blob: 76f16b74d1aad885bef0cb34cd5e715d50ccfe88 [file] [log] [blame]
Ben Hutchings8e730c12009-11-29 15:14:45 +00001/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
Ben Hutchings0a6f40c2011-02-25 00:01:34 +00004 * Copyright 2006-2011 Solarflare Communications Inc.
Ben Hutchings8e730c12009-11-29 15:14:45 +00005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/bitops.h>
12#include <linux/delay.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000013#include <linux/interrupt.h>
Ben Hutchings8e730c12009-11-29 15:14:45 +000014#include <linux/pci.h>
15#include <linux/module.h>
16#include <linux/seq_file.h>
17#include "net_driver.h"
18#include "bitfield.h"
19#include "efx.h"
20#include "nic.h"
21#include "regs.h"
22#include "io.h"
23#include "workarounds.h"
24
25/**************************************************************************
26 *
27 * Configurable values
28 *
29 **************************************************************************
30 */
31
32/* This is set to 16 for a good reason. In summary, if larger than
33 * 16, the descriptor cache holds more than a default socket
34 * buffer's worth of packets (for UDP we can only have at most one
35 * socket buffer's worth outstanding). This combined with the fact
36 * that we only get 1 TX event per descriptor cache means the NIC
37 * goes idle.
38 */
39#define TX_DC_ENTRIES 16
40#define TX_DC_ENTRIES_ORDER 1
41
42#define RX_DC_ENTRIES 64
43#define RX_DC_ENTRIES_ORDER 3
44
Ben Hutchings8e730c12009-11-29 15:14:45 +000045/* If EFX_MAX_INT_ERRORS internal errors occur within
46 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
47 * disable it.
48 */
49#define EFX_INT_ERROR_EXPIRE 3600
50#define EFX_MAX_INT_ERRORS 5
51
Ben Hutchings8e730c12009-11-29 15:14:45 +000052/* Depth of RX flush request fifo */
53#define EFX_RX_FLUSH_COUNT 4
54
Ben Hutchings4ef594e2012-02-07 23:39:18 +000055/* Driver generated events */
56#define _EFX_CHANNEL_MAGIC_TEST 0x000101
57#define _EFX_CHANNEL_MAGIC_FILL 0x000102
Ben Hutchings9f2cb712012-02-08 00:11:20 +000058#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
59#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
Steve Hodgsond730dc52010-06-01 11:19:09 +000060
Ben Hutchings4ef594e2012-02-07 23:39:18 +000061#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
62#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
63
64#define EFX_CHANNEL_MAGIC_TEST(_channel) \
65 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
Ben Hutchings2ae75da2012-02-07 23:49:52 +000066#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
68 efx_rx_queue_index(_rx_queue))
Ben Hutchings9f2cb712012-02-08 00:11:20 +000069#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
71 efx_rx_queue_index(_rx_queue))
72#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
74 (_tx_queue)->queue)
Steve Hodgson90d683a2010-06-01 11:19:39 +000075
Daniel Pieczkoe0992b72012-10-02 13:36:18 +010076static void efx_magic_event(struct efx_channel *channel, u32 magic);
77
Ben Hutchings8e730c12009-11-29 15:14:45 +000078/**************************************************************************
79 *
80 * Solarstorm hardware access
81 *
82 **************************************************************************/
83
84static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
85 unsigned int index)
86{
87 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
88 value, index);
89}
90
91/* Read the current event from the event queue */
92static inline efx_qword_t *efx_event(struct efx_channel *channel,
93 unsigned int index)
94{
Ben Hutchingsd4fabcc2011-04-04 14:22:11 +010095 return ((efx_qword_t *) (channel->eventq.addr)) +
96 (index & channel->eventq_mask);
Ben Hutchings8e730c12009-11-29 15:14:45 +000097}
98
99/* See if an event is present
100 *
101 * We check both the high and low dword of the event for all ones. We
102 * wrote all ones when we cleared the event, and no valid event can
103 * have all ones in either its high or low dwords. This approach is
104 * robust against reordering.
105 *
106 * Note that using a single 64-bit comparison is incorrect; even
107 * though the CPU read will be atomic, the DMA write may not be.
108 */
109static inline int efx_event_present(efx_qword_t *event)
110{
Eric Dumazet807540b2010-09-23 05:40:09 +0000111 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
112 EFX_DWORD_IS_ALL_ONES(event->dword[1]));
Ben Hutchings8e730c12009-11-29 15:14:45 +0000113}
114
115static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
116 const efx_oword_t *mask)
117{
118 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
119 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
120}
121
122int efx_nic_test_registers(struct efx_nic *efx,
123 const struct efx_nic_register_test *regs,
124 size_t n_regs)
125{
126 unsigned address = 0, i, j;
127 efx_oword_t mask, imask, original, reg, buf;
128
Ben Hutchings8e730c12009-11-29 15:14:45 +0000129 for (i = 0; i < n_regs; ++i) {
130 address = regs[i].address;
131 mask = imask = regs[i].mask;
132 EFX_INVERT_OWORD(imask);
133
134 efx_reado(efx, &original, address);
135
136 /* bit sweep on and off */
137 for (j = 0; j < 128; j++) {
138 if (!EFX_EXTRACT_OWORD32(mask, j, j))
139 continue;
140
141 /* Test this testable bit can be set in isolation */
142 EFX_AND_OWORD(reg, original, mask);
143 EFX_SET_OWORD32(reg, j, j, 1);
144
145 efx_writeo(efx, &reg, address);
146 efx_reado(efx, &buf, address);
147
148 if (efx_masked_compare_oword(&reg, &buf, &mask))
149 goto fail;
150
151 /* Test this testable bit can be cleared in isolation */
152 EFX_OR_OWORD(reg, original, mask);
153 EFX_SET_OWORD32(reg, j, j, 0);
154
155 efx_writeo(efx, &reg, address);
156 efx_reado(efx, &buf, address);
157
158 if (efx_masked_compare_oword(&reg, &buf, &mask))
159 goto fail;
160 }
161
162 efx_writeo(efx, &original, address);
163 }
164
165 return 0;
166
167fail:
Ben Hutchings62776d02010-06-23 11:30:07 +0000168 netif_err(efx, hw, efx->net_dev,
169 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
170 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
171 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
Ben Hutchings8e730c12009-11-29 15:14:45 +0000172 return -EIO;
173}
174
175/**************************************************************************
176 *
177 * Special buffer handling
178 * Special buffers are used for event queues and the TX and RX
179 * descriptor rings.
180 *
181 *************************************************************************/
182
183/*
184 * Initialise a special buffer
185 *
186 * This will define a buffer (previously allocated via
187 * efx_alloc_special_buffer()) in the buffer table, allowing
188 * it to be used for event queues, descriptor rings etc.
189 */
190static void
191efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
192{
193 efx_qword_t buf_desc;
Ben Hutchings5bbe2f42012-02-13 23:14:23 +0000194 unsigned int index;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000195 dma_addr_t dma_addr;
196 int i;
197
198 EFX_BUG_ON_PARANOID(!buffer->addr);
199
200 /* Write buffer descriptors to NIC */
201 for (i = 0; i < buffer->entries; i++) {
202 index = buffer->index + i;
Ben Hutchings5b6262d2012-02-02 21:21:15 +0000203 dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE);
Ben Hutchings62776d02010-06-23 11:30:07 +0000204 netif_dbg(efx, probe, efx->net_dev,
205 "mapping special buffer %d at %llx\n",
206 index, (unsigned long long)dma_addr);
Ben Hutchings8e730c12009-11-29 15:14:45 +0000207 EFX_POPULATE_QWORD_3(buf_desc,
208 FRF_AZ_BUF_ADR_REGION, 0,
209 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
210 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
211 efx_write_buf_tbl(efx, &buf_desc, index);
212 }
213}
214
215/* Unmaps a buffer and clears the buffer table entries */
216static void
217efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
218{
219 efx_oword_t buf_tbl_upd;
220 unsigned int start = buffer->index;
221 unsigned int end = (buffer->index + buffer->entries - 1);
222
223 if (!buffer->entries)
224 return;
225
Ben Hutchings62776d02010-06-23 11:30:07 +0000226 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
227 buffer->index, buffer->index + buffer->entries - 1);
Ben Hutchings8e730c12009-11-29 15:14:45 +0000228
229 EFX_POPULATE_OWORD_4(buf_tbl_upd,
230 FRF_AZ_BUF_UPD_CMD, 0,
231 FRF_AZ_BUF_CLR_CMD, 1,
232 FRF_AZ_BUF_CLR_END_ID, end,
233 FRF_AZ_BUF_CLR_START_ID, start);
234 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
235}
236
237/*
238 * Allocate a new special buffer
239 *
240 * This allocates memory for a new buffer, clears it and allocates a
241 * new buffer ID range. It does not write into the buffer table.
242 *
243 * This call will allocate 4KB buffers, since 8KB buffers can't be
244 * used for event queues and descriptor rings.
245 */
246static int efx_alloc_special_buffer(struct efx_nic *efx,
247 struct efx_special_buffer *buffer,
248 unsigned int len)
249{
250 len = ALIGN(len, EFX_BUF_SIZE);
251
Ben Hutchings58758aa2010-09-10 06:41:26 +0000252 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
253 &buffer->dma_addr, GFP_KERNEL);
Ben Hutchings8e730c12009-11-29 15:14:45 +0000254 if (!buffer->addr)
255 return -ENOMEM;
256 buffer->len = len;
257 buffer->entries = len / EFX_BUF_SIZE;
258 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
259
260 /* All zeros is a potentially valid event so memset to 0xff */
261 memset(buffer->addr, 0xff, len);
262
263 /* Select new buffer ID */
264 buffer->index = efx->next_buffer_table;
265 efx->next_buffer_table += buffer->entries;
Ben Hutchingscd2d5b52012-02-14 00:48:07 +0000266#ifdef CONFIG_SFC_SRIOV
267 BUG_ON(efx_sriov_enabled(efx) &&
268 efx->vf_buftbl_base < efx->next_buffer_table);
269#endif
Ben Hutchings8e730c12009-11-29 15:14:45 +0000270
Ben Hutchings62776d02010-06-23 11:30:07 +0000271 netif_dbg(efx, probe, efx->net_dev,
272 "allocating special buffers %d-%d at %llx+%x "
273 "(virt %p phys %llx)\n", buffer->index,
274 buffer->index + buffer->entries - 1,
275 (u64)buffer->dma_addr, len,
276 buffer->addr, (u64)virt_to_phys(buffer->addr));
Ben Hutchings8e730c12009-11-29 15:14:45 +0000277
278 return 0;
279}
280
281static void
282efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
283{
284 if (!buffer->addr)
285 return;
286
Ben Hutchings62776d02010-06-23 11:30:07 +0000287 netif_dbg(efx, hw, efx->net_dev,
288 "deallocating special buffers %d-%d at %llx+%x "
289 "(virt %p phys %llx)\n", buffer->index,
290 buffer->index + buffer->entries - 1,
291 (u64)buffer->dma_addr, buffer->len,
292 buffer->addr, (u64)virt_to_phys(buffer->addr));
Ben Hutchings8e730c12009-11-29 15:14:45 +0000293
Ben Hutchings58758aa2010-09-10 06:41:26 +0000294 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
295 buffer->dma_addr);
Ben Hutchings8e730c12009-11-29 15:14:45 +0000296 buffer->addr = NULL;
297 buffer->entries = 0;
298}
299
300/**************************************************************************
301 *
302 * Generic buffer handling
303 * These buffers are used for interrupt status and MAC stats
304 *
305 **************************************************************************/
306
307int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
308 unsigned int len)
309{
310 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
311 &buffer->dma_addr);
312 if (!buffer->addr)
313 return -ENOMEM;
314 buffer->len = len;
315 memset(buffer->addr, 0, len);
316 return 0;
317}
318
319void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
320{
321 if (buffer->addr) {
322 pci_free_consistent(efx->pci_dev, buffer->len,
323 buffer->addr, buffer->dma_addr);
324 buffer->addr = NULL;
325 }
326}
327
328/**************************************************************************
329 *
330 * TX path
331 *
332 **************************************************************************/
333
334/* Returns a pointer to the specified transmit descriptor in the TX
335 * descriptor queue belonging to the specified channel.
336 */
337static inline efx_qword_t *
338efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
339{
Eric Dumazet807540b2010-09-23 05:40:09 +0000340 return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000341}
342
343/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
344static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
345{
346 unsigned write_ptr;
347 efx_dword_t reg;
348
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000349 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000350 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
351 efx_writed_page(tx_queue->efx, &reg,
352 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
353}
354
Ben Hutchingscd385572010-11-15 23:53:11 +0000355/* Write pointer and first descriptor for TX descriptor ring */
356static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
357 const efx_qword_t *txd)
358{
359 unsigned write_ptr;
360 efx_oword_t reg;
361
362 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
363 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
364
365 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
366 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
367 FRF_AZ_TX_DESC_WPTR, write_ptr);
368 reg.qword[0] = *txd;
369 efx_writeo_page(tx_queue->efx, &reg,
370 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
371}
372
373static inline bool
374efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
375{
376 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
377
378 if (empty_read_count == 0)
379 return false;
380
381 tx_queue->empty_read_count = 0;
382 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
383}
Ben Hutchings8e730c12009-11-29 15:14:45 +0000384
385/* For each entry inserted into the software descriptor ring, create a
386 * descriptor in the hardware TX descriptor ring (in host memory), and
387 * write a doorbell.
388 */
389void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
390{
391
392 struct efx_tx_buffer *buffer;
393 efx_qword_t *txd;
394 unsigned write_ptr;
Ben Hutchingscd385572010-11-15 23:53:11 +0000395 unsigned old_write_count = tx_queue->write_count;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000396
397 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
398
399 do {
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000400 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000401 buffer = &tx_queue->buffer[write_ptr];
402 txd = efx_tx_desc(tx_queue, write_ptr);
403 ++tx_queue->write_count;
404
405 /* Create TX descriptor ring entry */
406 EFX_POPULATE_QWORD_4(*txd,
407 FSF_AZ_TX_KER_CONT, buffer->continuation,
408 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
409 FSF_AZ_TX_KER_BUF_REGION, 0,
410 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
411 } while (tx_queue->write_count != tx_queue->insert_count);
412
413 wmb(); /* Ensure descriptors are written before they are fetched */
Ben Hutchingscd385572010-11-15 23:53:11 +0000414
415 if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
416 txd = efx_tx_desc(tx_queue,
417 old_write_count & tx_queue->ptr_mask);
418 efx_push_tx_desc(tx_queue, txd);
419 ++tx_queue->pushes;
420 } else {
421 efx_notify_tx_desc(tx_queue);
422 }
Ben Hutchings8e730c12009-11-29 15:14:45 +0000423}
424
425/* Allocate hardware resources for a TX queue */
426int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
427{
428 struct efx_nic *efx = tx_queue->efx;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000429 unsigned entries;
430
431 entries = tx_queue->ptr_mask + 1;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000432 return efx_alloc_special_buffer(efx, &tx_queue->txd,
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000433 entries * sizeof(efx_qword_t));
Ben Hutchings8e730c12009-11-29 15:14:45 +0000434}
435
436void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
437{
Ben Hutchings8e730c12009-11-29 15:14:45 +0000438 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings94b274b2011-01-10 21:18:20 +0000439 efx_oword_t reg;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000440
Ben Hutchings8e730c12009-11-29 15:14:45 +0000441 /* Pin TX descriptor ring */
442 efx_init_special_buffer(efx, &tx_queue->txd);
443
444 /* Push TX descriptor ring to card */
Ben Hutchings94b274b2011-01-10 21:18:20 +0000445 EFX_POPULATE_OWORD_10(reg,
Ben Hutchings8e730c12009-11-29 15:14:45 +0000446 FRF_AZ_TX_DESCQ_EN, 1,
447 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
448 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
449 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
450 FRF_AZ_TX_DESCQ_EVQ_ID,
451 tx_queue->channel->channel,
452 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
453 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
454 FRF_AZ_TX_DESCQ_SIZE,
455 __ffs(tx_queue->txd.entries),
456 FRF_AZ_TX_DESCQ_TYPE, 0,
457 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
458
459 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
Ben Hutchingsa4900ac2010-04-28 09:30:43 +0000460 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
Ben Hutchings94b274b2011-01-10 21:18:20 +0000461 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
462 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
Ben Hutchings8e730c12009-11-29 15:14:45 +0000463 !csum);
464 }
465
Ben Hutchings94b274b2011-01-10 21:18:20 +0000466 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
Ben Hutchings8e730c12009-11-29 15:14:45 +0000467 tx_queue->queue);
468
469 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
Ben Hutchings8e730c12009-11-29 15:14:45 +0000470 /* Only 128 bits in this register */
Ben Hutchingsa4900ac2010-04-28 09:30:43 +0000471 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
Ben Hutchings8e730c12009-11-29 15:14:45 +0000472
473 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
Ben Hutchingsa4900ac2010-04-28 09:30:43 +0000474 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
Ben Hutchings8e730c12009-11-29 15:14:45 +0000475 clear_bit_le(tx_queue->queue, (void *)&reg);
476 else
477 set_bit_le(tx_queue->queue, (void *)&reg);
478 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
479 }
Ben Hutchings94b274b2011-01-10 21:18:20 +0000480
481 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
482 EFX_POPULATE_OWORD_1(reg,
483 FRF_BZ_TX_PACE,
484 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
485 FFE_BZ_TX_PACE_OFF :
486 FFE_BZ_TX_PACE_RESERVED);
487 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
488 tx_queue->queue);
489 }
Ben Hutchings8e730c12009-11-29 15:14:45 +0000490}
491
492static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
493{
494 struct efx_nic *efx = tx_queue->efx;
495 efx_oword_t tx_flush_descq;
496
Daniel Pieczkoe0992b72012-10-02 13:36:18 +0100497 WARN_ON(atomic_read(&tx_queue->flush_outstanding));
498 atomic_set(&tx_queue->flush_outstanding, 1);
499
Ben Hutchings8e730c12009-11-29 15:14:45 +0000500 EFX_POPULATE_OWORD_2(tx_flush_descq,
501 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
502 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
503 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
504}
505
506void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
507{
508 struct efx_nic *efx = tx_queue->efx;
509 efx_oword_t tx_desc_ptr;
510
Ben Hutchings8e730c12009-11-29 15:14:45 +0000511 /* Remove TX descriptor ring from card */
512 EFX_ZERO_OWORD(tx_desc_ptr);
513 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
514 tx_queue->queue);
515
516 /* Unpin TX descriptor ring */
517 efx_fini_special_buffer(efx, &tx_queue->txd);
518}
519
520/* Free buffers backing TX queue */
521void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
522{
523 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
524}
525
526/**************************************************************************
527 *
528 * RX path
529 *
530 **************************************************************************/
531
532/* Returns a pointer to the specified descriptor in the RX descriptor queue */
533static inline efx_qword_t *
534efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
535{
Eric Dumazet807540b2010-09-23 05:40:09 +0000536 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000537}
538
539/* This creates an entry in the RX descriptor queue */
540static inline void
541efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
542{
543 struct efx_rx_buffer *rx_buf;
544 efx_qword_t *rxd;
545
546 rxd = efx_rx_desc(rx_queue, index);
547 rx_buf = efx_rx_buffer(rx_queue, index);
548 EFX_POPULATE_QWORD_3(*rxd,
549 FSF_AZ_RX_KER_BUF_SIZE,
550 rx_buf->len -
551 rx_queue->efx->type->rx_buffer_padding,
552 FSF_AZ_RX_KER_BUF_REGION, 0,
553 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
554}
555
556/* This writes to the RX_DESC_WPTR register for the specified receive
557 * descriptor ring.
558 */
559void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
560{
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000561 struct efx_nic *efx = rx_queue->efx;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000562 efx_dword_t reg;
563 unsigned write_ptr;
564
565 while (rx_queue->notified_count != rx_queue->added_count) {
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000566 efx_build_rx_desc(
567 rx_queue,
568 rx_queue->notified_count & rx_queue->ptr_mask);
Ben Hutchings8e730c12009-11-29 15:14:45 +0000569 ++rx_queue->notified_count;
570 }
571
572 wmb();
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000573 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000574 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000575 efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000576 efx_rx_queue_index(rx_queue));
Ben Hutchings8e730c12009-11-29 15:14:45 +0000577}
578
579int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
580{
581 struct efx_nic *efx = rx_queue->efx;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000582 unsigned entries;
583
584 entries = rx_queue->ptr_mask + 1;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000585 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000586 entries * sizeof(efx_qword_t));
Ben Hutchings8e730c12009-11-29 15:14:45 +0000587}
588
589void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
590{
591 efx_oword_t rx_desc_ptr;
592 struct efx_nic *efx = rx_queue->efx;
593 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
594 bool iscsi_digest_en = is_b0;
595
Ben Hutchings62776d02010-06-23 11:30:07 +0000596 netif_dbg(efx, hw, efx->net_dev,
597 "RX queue %d ring in special buffers %d-%d\n",
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000598 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
Ben Hutchings62776d02010-06-23 11:30:07 +0000599 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
Ben Hutchings8e730c12009-11-29 15:14:45 +0000600
Ben Hutchings8e730c12009-11-29 15:14:45 +0000601 /* Pin RX descriptor ring */
602 efx_init_special_buffer(efx, &rx_queue->rxd);
603
604 /* Push RX descriptor ring to card */
605 EFX_POPULATE_OWORD_10(rx_desc_ptr,
606 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
607 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
608 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
609 FRF_AZ_RX_DESCQ_EVQ_ID,
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000610 efx_rx_queue_channel(rx_queue)->channel,
Ben Hutchings8e730c12009-11-29 15:14:45 +0000611 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000612 FRF_AZ_RX_DESCQ_LABEL,
613 efx_rx_queue_index(rx_queue),
Ben Hutchings8e730c12009-11-29 15:14:45 +0000614 FRF_AZ_RX_DESCQ_SIZE,
615 __ffs(rx_queue->rxd.entries),
616 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
617 /* For >=B0 this is scatter so disable */
618 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
619 FRF_AZ_RX_DESCQ_EN, 1);
620 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000621 efx_rx_queue_index(rx_queue));
Ben Hutchings8e730c12009-11-29 15:14:45 +0000622}
623
624static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
625{
626 struct efx_nic *efx = rx_queue->efx;
627 efx_oword_t rx_flush_descq;
628
Ben Hutchings8e730c12009-11-29 15:14:45 +0000629 EFX_POPULATE_OWORD_2(rx_flush_descq,
630 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000631 FRF_AZ_RX_FLUSH_DESCQ,
632 efx_rx_queue_index(rx_queue));
Ben Hutchings8e730c12009-11-29 15:14:45 +0000633 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
634}
635
636void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
637{
638 efx_oword_t rx_desc_ptr;
639 struct efx_nic *efx = rx_queue->efx;
640
Ben Hutchings8e730c12009-11-29 15:14:45 +0000641 /* Remove RX descriptor ring from card */
642 EFX_ZERO_OWORD(rx_desc_ptr);
643 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000644 efx_rx_queue_index(rx_queue));
Ben Hutchings8e730c12009-11-29 15:14:45 +0000645
646 /* Unpin RX descriptor ring */
647 efx_fini_special_buffer(efx, &rx_queue->rxd);
648}
649
650/* Free buffers backing RX queue */
651void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
652{
653 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
654}
655
656/**************************************************************************
657 *
Ben Hutchings9f2cb712012-02-08 00:11:20 +0000658 * Flush handling
659 *
660 **************************************************************************/
661
662/* efx_nic_flush_queues() must be woken up when all flushes are completed,
663 * or more RX flushes can be kicked off.
664 */
665static bool efx_flush_wake(struct efx_nic *efx)
666{
667 /* Ensure that all updates are visible to efx_nic_flush_queues() */
668 smp_mb();
669
670 return (atomic_read(&efx->drain_pending) == 0 ||
671 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
672 && atomic_read(&efx->rxq_flush_pending) > 0));
673}
674
Daniel Pieczkoe0992b72012-10-02 13:36:18 +0100675static bool efx_check_tx_flush_complete(struct efx_nic *efx)
676{
677 bool i = true;
678 efx_oword_t txd_ptr_tbl;
679 struct efx_channel *channel;
680 struct efx_tx_queue *tx_queue;
681
682 efx_for_each_channel(channel, efx) {
683 efx_for_each_channel_tx_queue(tx_queue, channel) {
684 efx_reado_table(efx, &txd_ptr_tbl,
685 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
686 if (EFX_OWORD_FIELD(txd_ptr_tbl,
687 FRF_AZ_TX_DESCQ_FLUSH) ||
688 EFX_OWORD_FIELD(txd_ptr_tbl,
689 FRF_AZ_TX_DESCQ_EN)) {
690 netif_dbg(efx, hw, efx->net_dev,
691 "flush did not complete on TXQ %d\n",
692 tx_queue->queue);
693 i = false;
694 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
695 1, 0)) {
696 /* The flush is complete, but we didn't
697 * receive a flush completion event
698 */
699 netif_dbg(efx, hw, efx->net_dev,
700 "flush complete on TXQ %d, so drain "
701 "the queue\n", tx_queue->queue);
702 /* Don't need to increment drain_pending as it
703 * has already been incremented for the queues
704 * which did not drain
705 */
706 efx_magic_event(channel,
707 EFX_CHANNEL_MAGIC_TX_DRAIN(
708 tx_queue));
709 }
710 }
711 }
712
713 return i;
714}
715
Ben Hutchings9f2cb712012-02-08 00:11:20 +0000716/* Flush all the transmit queues, and continue flushing receive queues until
717 * they're all flushed. Wait for the DRAIN events to be recieved so that there
718 * are no more RX and TX events left on any channel. */
719int efx_nic_flush_queues(struct efx_nic *efx)
720{
721 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
722 struct efx_channel *channel;
723 struct efx_rx_queue *rx_queue;
724 struct efx_tx_queue *tx_queue;
725 int rc = 0;
726
727 efx->type->prepare_flush(efx);
728
729 efx_for_each_channel(channel, efx) {
730 efx_for_each_channel_tx_queue(tx_queue, channel) {
731 atomic_inc(&efx->drain_pending);
732 efx_flush_tx_queue(tx_queue);
733 }
734 efx_for_each_channel_rx_queue(rx_queue, channel) {
735 atomic_inc(&efx->drain_pending);
736 rx_queue->flush_pending = true;
737 atomic_inc(&efx->rxq_flush_pending);
738 }
739 }
740
741 while (timeout && atomic_read(&efx->drain_pending) > 0) {
Ben Hutchingscd2d5b52012-02-14 00:48:07 +0000742 /* If SRIOV is enabled, then offload receive queue flushing to
743 * the firmware (though we will still have to poll for
744 * completion). If that fails, fall back to the old scheme.
745 */
746 if (efx_sriov_enabled(efx)) {
747 rc = efx_mcdi_flush_rxqs(efx);
748 if (!rc)
749 goto wait;
750 }
751
Ben Hutchings9f2cb712012-02-08 00:11:20 +0000752 /* The hardware supports four concurrent rx flushes, each of
753 * which may need to be retried if there is an outstanding
754 * descriptor fetch
755 */
756 efx_for_each_channel(channel, efx) {
757 efx_for_each_channel_rx_queue(rx_queue, channel) {
758 if (atomic_read(&efx->rxq_flush_outstanding) >=
759 EFX_RX_FLUSH_COUNT)
760 break;
761
762 if (rx_queue->flush_pending) {
763 rx_queue->flush_pending = false;
764 atomic_dec(&efx->rxq_flush_pending);
765 atomic_inc(&efx->rxq_flush_outstanding);
766 efx_flush_rx_queue(rx_queue);
767 }
768 }
769 }
770
Ben Hutchingscd2d5b52012-02-14 00:48:07 +0000771 wait:
Ben Hutchings9f2cb712012-02-08 00:11:20 +0000772 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx),
773 timeout);
774 }
775
Daniel Pieczkoe0992b72012-10-02 13:36:18 +0100776 if (atomic_read(&efx->drain_pending) &&
777 !efx_check_tx_flush_complete(efx)) {
Ben Hutchings9f2cb712012-02-08 00:11:20 +0000778 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
779 "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
780 atomic_read(&efx->rxq_flush_outstanding),
781 atomic_read(&efx->rxq_flush_pending));
782 rc = -ETIMEDOUT;
783
784 atomic_set(&efx->drain_pending, 0);
785 atomic_set(&efx->rxq_flush_pending, 0);
786 atomic_set(&efx->rxq_flush_outstanding, 0);
787 }
788
Ben Hutchings60990702012-09-06 16:52:31 +0100789 efx->type->finish_flush(efx);
Steve Hodgsona606f432011-05-23 12:18:45 +0100790
Ben Hutchings9f2cb712012-02-08 00:11:20 +0000791 return rc;
792}
793
794/**************************************************************************
795 *
Ben Hutchings8e730c12009-11-29 15:14:45 +0000796 * Event queue processing
797 * Event queues are processed by per-channel tasklets.
798 *
799 **************************************************************************/
800
801/* Update a channel's event queue's read pointer (RPTR) register
802 *
803 * This writes the EVQ_RPTR_REG register for the specified channel's
804 * event queue.
Ben Hutchings8e730c12009-11-29 15:14:45 +0000805 */
806void efx_nic_eventq_read_ack(struct efx_channel *channel)
807{
808 efx_dword_t reg;
809 struct efx_nic *efx = channel->efx;
810
Ben Hutchingsd4fabcc2011-04-04 14:22:11 +0100811 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
812 channel->eventq_read_ptr & channel->eventq_mask);
Ben Hutchings8e730c12009-11-29 15:14:45 +0000813 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
814 channel->channel);
815}
816
817/* Use HW to insert a SW defined event */
Ben Hutchings90893002012-02-10 22:23:41 +0000818void efx_generate_event(struct efx_nic *efx, unsigned int evq,
819 efx_qword_t *event)
Ben Hutchings8e730c12009-11-29 15:14:45 +0000820{
821 efx_oword_t drv_ev_reg;
822
823 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
824 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
825 drv_ev_reg.u32[0] = event->u32[0];
826 drv_ev_reg.u32[1] = event->u32[1];
827 drv_ev_reg.u32[2] = 0;
828 drv_ev_reg.u32[3] = 0;
Ben Hutchings90893002012-02-10 22:23:41 +0000829 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
830 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
Ben Hutchings8e730c12009-11-29 15:14:45 +0000831}
832
Ben Hutchings4ef594e2012-02-07 23:39:18 +0000833static void efx_magic_event(struct efx_channel *channel, u32 magic)
834{
835 efx_qword_t event;
836
837 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
838 FSE_AZ_EV_CODE_DRV_GEN_EV,
839 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
Ben Hutchings90893002012-02-10 22:23:41 +0000840 efx_generate_event(channel->efx, channel->channel, &event);
Ben Hutchings4ef594e2012-02-07 23:39:18 +0000841}
842
Ben Hutchings8e730c12009-11-29 15:14:45 +0000843/* Handle a transmit completion event
844 *
845 * The NIC batches TX completion events; the message we receive is of
846 * the form "complete all TX events up to this index".
847 */
Ben Hutchingsfa236e12010-04-28 09:29:42 +0000848static int
Ben Hutchings8e730c12009-11-29 15:14:45 +0000849efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
850{
851 unsigned int tx_ev_desc_ptr;
852 unsigned int tx_ev_q_label;
853 struct efx_tx_queue *tx_queue;
854 struct efx_nic *efx = channel->efx;
Ben Hutchingsfa236e12010-04-28 09:29:42 +0000855 int tx_packets = 0;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000856
Ben Hutchings9f2cb712012-02-08 00:11:20 +0000857 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
858 return 0;
859
Ben Hutchings8e730c12009-11-29 15:14:45 +0000860 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
861 /* Transmit completion */
862 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
863 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
Ben Hutchingsf7d12cd2010-09-10 06:41:47 +0000864 tx_queue = efx_channel_get_tx_queue(
865 channel, tx_ev_q_label % EFX_TXQ_TYPES);
Ben Hutchingsfa236e12010-04-28 09:29:42 +0000866 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000867 tx_queue->ptr_mask);
Ben Hutchings8e730c12009-11-29 15:14:45 +0000868 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
869 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
870 /* Rewrite the FIFO write pointer */
871 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
Ben Hutchingsf7d12cd2010-09-10 06:41:47 +0000872 tx_queue = efx_channel_get_tx_queue(
873 channel, tx_ev_q_label % EFX_TXQ_TYPES);
Ben Hutchings8e730c12009-11-29 15:14:45 +0000874
Ben Hutchings73ba7b62012-01-09 19:47:08 +0000875 netif_tx_lock(efx->net_dev);
Ben Hutchings8e730c12009-11-29 15:14:45 +0000876 efx_notify_tx_desc(tx_queue);
Ben Hutchings73ba7b62012-01-09 19:47:08 +0000877 netif_tx_unlock(efx->net_dev);
Ben Hutchings8e730c12009-11-29 15:14:45 +0000878 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
879 EFX_WORKAROUND_10727(efx)) {
880 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
881 } else {
Ben Hutchings62776d02010-06-23 11:30:07 +0000882 netif_err(efx, tx_err, efx->net_dev,
883 "channel %d unexpected TX event "
884 EFX_QWORD_FMT"\n", channel->channel,
885 EFX_QWORD_VAL(*event));
Ben Hutchings8e730c12009-11-29 15:14:45 +0000886 }
Ben Hutchingsfa236e12010-04-28 09:29:42 +0000887
888 return tx_packets;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000889}
890
891/* Detect errors included in the rx_evt_pkt_ok bit. */
Ben Hutchingsdb339562011-08-26 18:05:11 +0100892static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
893 const efx_qword_t *event)
Ben Hutchings8e730c12009-11-29 15:14:45 +0000894{
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000895 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
Ben Hutchings8e730c12009-11-29 15:14:45 +0000896 struct efx_nic *efx = rx_queue->efx;
897 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
898 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
899 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
900 bool rx_ev_other_err, rx_ev_pause_frm;
901 bool rx_ev_hdr_type, rx_ev_mcast_pkt;
902 unsigned rx_ev_pkt_type;
903
904 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
905 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
906 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
907 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
908 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
909 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
910 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
911 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
912 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
913 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
914 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
915 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
916 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
917 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
918 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
919
920 /* Every error apart from tobe_disc and pause_frm */
921 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
922 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
923 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
924
925 /* Count errors that are not in MAC stats. Ignore expected
926 * checksum errors during self-test. */
927 if (rx_ev_frm_trunc)
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000928 ++channel->n_rx_frm_trunc;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000929 else if (rx_ev_tobe_disc)
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000930 ++channel->n_rx_tobe_disc;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000931 else if (!efx->loopback_selftest) {
932 if (rx_ev_ip_hdr_chksum_err)
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000933 ++channel->n_rx_ip_hdr_chksum_err;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000934 else if (rx_ev_tcp_udp_chksum_err)
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000935 ++channel->n_rx_tcp_udp_chksum_err;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000936 }
937
Ben Hutchings8e730c12009-11-29 15:14:45 +0000938 /* TOBE_DISC is expected on unicast mismatches; don't print out an
939 * error message. FRM_TRUNC indicates RXDP dropped the packet due
940 * to a FIFO overflow.
941 */
Ben Hutchings5f3f9d62011-11-04 22:29:14 +0000942#ifdef DEBUG
Ben Hutchings62776d02010-06-23 11:30:07 +0000943 if (rx_ev_other_err && net_ratelimit()) {
944 netif_dbg(efx, rx_err, efx->net_dev,
945 " RX queue %d unexpected RX event "
946 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000947 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
Ben Hutchings62776d02010-06-23 11:30:07 +0000948 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
949 rx_ev_ip_hdr_chksum_err ?
950 " [IP_HDR_CHKSUM_ERR]" : "",
951 rx_ev_tcp_udp_chksum_err ?
952 " [TCP_UDP_CHKSUM_ERR]" : "",
953 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
954 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
955 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
956 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
957 rx_ev_pause_frm ? " [PAUSE]" : "");
Ben Hutchings8e730c12009-11-29 15:14:45 +0000958 }
959#endif
Ben Hutchingsdb339562011-08-26 18:05:11 +0100960
961 /* The frame must be discarded if any of these are true. */
962 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
963 rx_ev_tobe_disc | rx_ev_pause_frm) ?
964 EFX_RX_PKT_DISCARD : 0;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000965}
966
967/* Handle receive events that are not in-order. */
968static void
969efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
970{
971 struct efx_nic *efx = rx_queue->efx;
972 unsigned expected, dropped;
973
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000974 expected = rx_queue->removed_count & rx_queue->ptr_mask;
975 dropped = (index - expected) & rx_queue->ptr_mask;
Ben Hutchings62776d02010-06-23 11:30:07 +0000976 netif_info(efx, rx_err, efx->net_dev,
977 "dropped %d events (index=%d expected=%d)\n",
978 dropped, index, expected);
Ben Hutchings8e730c12009-11-29 15:14:45 +0000979
980 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
981 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
982}
983
984/* Handle a packet received event
985 *
986 * The NIC gives a "discard" flag if it's a unicast packet with the
987 * wrong destination address
988 * Also "is multicast" and "matches multicast filter" flags can be used to
989 * discard non-matching multicast packets.
990 */
991static void
992efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
993{
994 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
995 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
996 unsigned expected_ptr;
Ben Hutchingsdb339562011-08-26 18:05:11 +0100997 bool rx_ev_pkt_ok;
998 u16 flags;
Ben Hutchings8e730c12009-11-29 15:14:45 +0000999 struct efx_rx_queue *rx_queue;
Ben Hutchings9f2cb712012-02-08 00:11:20 +00001000 struct efx_nic *efx = channel->efx;
1001
1002 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1003 return;
Ben Hutchings8e730c12009-11-29 15:14:45 +00001004
1005 /* Basic packet information */
1006 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1007 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1008 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1009 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
1010 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
1011 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
1012 channel->channel);
1013
Ben Hutchingsf7d12cd2010-09-10 06:41:47 +00001014 rx_queue = efx_channel_get_rx_queue(channel);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001015
1016 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
Steve Hodgsonecc910f2010-09-10 06:42:22 +00001017 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
Ben Hutchings8e730c12009-11-29 15:14:45 +00001018 if (unlikely(rx_ev_desc_ptr != expected_ptr))
1019 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
1020
1021 if (likely(rx_ev_pkt_ok)) {
1022 /* If packet is marked as OK and packet type is TCP/IP or
1023 * UDP/IP, then we can rely on the hardware checksum.
1024 */
Ben Hutchingsdb339562011-08-26 18:05:11 +01001025 flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
1026 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ?
1027 EFX_RX_PKT_CSUMMED : 0;
Ben Hutchings8e730c12009-11-29 15:14:45 +00001028 } else {
Ben Hutchingsdb339562011-08-26 18:05:11 +01001029 flags = efx_handle_rx_not_ok(rx_queue, event);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001030 }
1031
1032 /* Detect multicast packets that didn't match the filter */
1033 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
1034 if (rx_ev_mcast_pkt) {
1035 unsigned int rx_ev_mcast_hash_match =
1036 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
1037
1038 if (unlikely(!rx_ev_mcast_hash_match)) {
1039 ++channel->n_rx_mcast_mismatch;
Ben Hutchingsdb339562011-08-26 18:05:11 +01001040 flags |= EFX_RX_PKT_DISCARD;
Ben Hutchings8e730c12009-11-29 15:14:45 +00001041 }
1042 }
1043
1044 channel->irq_mod_score += 2;
1045
1046 /* Handle received packet */
Ben Hutchingsdb339562011-08-26 18:05:11 +01001047 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001048}
1049
Ben Hutchings9f2cb712012-02-08 00:11:20 +00001050/* If this flush done event corresponds to a &struct efx_tx_queue, then
1051 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1052 * of all transmit completions.
1053 */
1054static void
1055efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1056{
1057 struct efx_tx_queue *tx_queue;
1058 int qid;
1059
1060 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1061 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
1062 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
1063 qid % EFX_TXQ_TYPES);
Daniel Pieczkoe0992b72012-10-02 13:36:18 +01001064 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1065 efx_magic_event(tx_queue->channel,
1066 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1067 }
Ben Hutchings9f2cb712012-02-08 00:11:20 +00001068 }
1069}
1070
1071/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1072 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1073 * the RX queue back to the mask of RX queues in need of flushing.
1074 */
1075static void
1076efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1077{
1078 struct efx_channel *channel;
1079 struct efx_rx_queue *rx_queue;
1080 int qid;
1081 bool failed;
1082
1083 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1084 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1085 if (qid >= efx->n_channels)
1086 return;
1087 channel = efx_get_channel(efx, qid);
1088 if (!efx_channel_has_rx_queue(channel))
1089 return;
1090 rx_queue = efx_channel_get_rx_queue(channel);
1091
1092 if (failed) {
1093 netif_info(efx, hw, efx->net_dev,
1094 "RXQ %d flush retry\n", qid);
1095 rx_queue->flush_pending = true;
1096 atomic_inc(&efx->rxq_flush_pending);
1097 } else {
1098 efx_magic_event(efx_rx_queue_channel(rx_queue),
1099 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1100 }
1101 atomic_dec(&efx->rxq_flush_outstanding);
1102 if (efx_flush_wake(efx))
1103 wake_up(&efx->flush_wq);
1104}
1105
1106static void
1107efx_handle_drain_event(struct efx_channel *channel)
1108{
1109 struct efx_nic *efx = channel->efx;
1110
1111 WARN_ON(atomic_read(&efx->drain_pending) == 0);
1112 atomic_dec(&efx->drain_pending);
1113 if (efx_flush_wake(efx))
1114 wake_up(&efx->flush_wq);
1115}
1116
Steve Hodgson90d683a2010-06-01 11:19:39 +00001117static void
1118efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
1119{
1120 struct efx_nic *efx = channel->efx;
Ben Hutchings2ae75da2012-02-07 23:49:52 +00001121 struct efx_rx_queue *rx_queue =
1122 efx_channel_has_rx_queue(channel) ?
1123 efx_channel_get_rx_queue(channel) : NULL;
Ben Hutchings9f2cb712012-02-08 00:11:20 +00001124 unsigned magic, code;
Steve Hodgson90d683a2010-06-01 11:19:39 +00001125
Ben Hutchings4ef594e2012-02-07 23:39:18 +00001126 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
Ben Hutchings9f2cb712012-02-08 00:11:20 +00001127 code = _EFX_CHANNEL_MAGIC_CODE(magic);
Ben Hutchings4ef594e2012-02-07 23:39:18 +00001128
Ben Hutchings9f2cb712012-02-08 00:11:20 +00001129 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
Ben Hutchingsdd407812012-02-28 23:40:21 +00001130 channel->event_test_cpu = raw_smp_processor_id();
Ben Hutchings9f2cb712012-02-08 00:11:20 +00001131 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
Steve Hodgson90d683a2010-06-01 11:19:39 +00001132 /* The queue must be empty, so we won't receive any rx
1133 * events, so efx_process_channel() won't refill the
1134 * queue. Refill it here */
Ben Hutchings2ae75da2012-02-07 23:49:52 +00001135 efx_fast_push_rx_descriptors(rx_queue);
Ben Hutchings9f2cb712012-02-08 00:11:20 +00001136 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1137 rx_queue->enabled = false;
1138 efx_handle_drain_event(channel);
1139 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
1140 efx_handle_drain_event(channel);
1141 } else {
Ben Hutchings62776d02010-06-23 11:30:07 +00001142 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1143 "generated event "EFX_QWORD_FMT"\n",
1144 channel->channel, EFX_QWORD_VAL(*event));
Ben Hutchings9f2cb712012-02-08 00:11:20 +00001145 }
Steve Hodgson90d683a2010-06-01 11:19:39 +00001146}
1147
Ben Hutchings8e730c12009-11-29 15:14:45 +00001148static void
1149efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1150{
1151 struct efx_nic *efx = channel->efx;
1152 unsigned int ev_sub_code;
1153 unsigned int ev_sub_data;
1154
1155 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1156 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1157
1158 switch (ev_sub_code) {
1159 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
Ben Hutchings62776d02010-06-23 11:30:07 +00001160 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1161 channel->channel, ev_sub_data);
Ben Hutchings9f2cb712012-02-08 00:11:20 +00001162 efx_handle_tx_flush_done(efx, event);
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001163 efx_sriov_tx_flush_done(efx, event);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001164 break;
1165 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
Ben Hutchings62776d02010-06-23 11:30:07 +00001166 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1167 channel->channel, ev_sub_data);
Ben Hutchings9f2cb712012-02-08 00:11:20 +00001168 efx_handle_rx_flush_done(efx, event);
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001169 efx_sriov_rx_flush_done(efx, event);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001170 break;
1171 case FSE_AZ_EVQ_INIT_DONE_EV:
Ben Hutchings62776d02010-06-23 11:30:07 +00001172 netif_dbg(efx, hw, efx->net_dev,
1173 "channel %d EVQ %d initialised\n",
1174 channel->channel, ev_sub_data);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001175 break;
1176 case FSE_AZ_SRM_UPD_DONE_EV:
Ben Hutchings62776d02010-06-23 11:30:07 +00001177 netif_vdbg(efx, hw, efx->net_dev,
1178 "channel %d SRAM update done\n", channel->channel);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001179 break;
1180 case FSE_AZ_WAKE_UP_EV:
Ben Hutchings62776d02010-06-23 11:30:07 +00001181 netif_vdbg(efx, hw, efx->net_dev,
1182 "channel %d RXQ %d wakeup event\n",
1183 channel->channel, ev_sub_data);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001184 break;
1185 case FSE_AZ_TIMER_EV:
Ben Hutchings62776d02010-06-23 11:30:07 +00001186 netif_vdbg(efx, hw, efx->net_dev,
1187 "channel %d RX queue %d timer expired\n",
1188 channel->channel, ev_sub_data);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001189 break;
1190 case FSE_AA_RX_RECOVER_EV:
Ben Hutchings62776d02010-06-23 11:30:07 +00001191 netif_err(efx, rx_err, efx->net_dev,
1192 "channel %d seen DRIVER RX_RESET event. "
Ben Hutchings8e730c12009-11-29 15:14:45 +00001193 "Resetting.\n", channel->channel);
1194 atomic_inc(&efx->rx_reset);
1195 efx_schedule_reset(efx,
1196 EFX_WORKAROUND_6555(efx) ?
1197 RESET_TYPE_RX_RECOVERY :
1198 RESET_TYPE_DISABLE);
1199 break;
1200 case FSE_BZ_RX_DSC_ERROR_EV:
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001201 if (ev_sub_data < EFX_VI_BASE) {
1202 netif_err(efx, rx_err, efx->net_dev,
1203 "RX DMA Q %d reports descriptor fetch error."
1204 " RX Q %d is disabled.\n", ev_sub_data,
1205 ev_sub_data);
1206 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
1207 } else
1208 efx_sriov_desc_fetch_err(efx, ev_sub_data);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001209 break;
1210 case FSE_BZ_TX_DSC_ERROR_EV:
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001211 if (ev_sub_data < EFX_VI_BASE) {
1212 netif_err(efx, tx_err, efx->net_dev,
1213 "TX DMA Q %d reports descriptor fetch error."
1214 " TX Q %d is disabled.\n", ev_sub_data,
1215 ev_sub_data);
1216 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
1217 } else
1218 efx_sriov_desc_fetch_err(efx, ev_sub_data);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001219 break;
1220 default:
Ben Hutchings62776d02010-06-23 11:30:07 +00001221 netif_vdbg(efx, hw, efx->net_dev,
1222 "channel %d unknown driver event code %d "
1223 "data %04x\n", channel->channel, ev_sub_code,
1224 ev_sub_data);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001225 break;
1226 }
1227}
1228
Ben Hutchingsfa236e12010-04-28 09:29:42 +00001229int efx_nic_process_eventq(struct efx_channel *channel, int budget)
Ben Hutchings8e730c12009-11-29 15:14:45 +00001230{
Steve Hodgsonecc910f2010-09-10 06:42:22 +00001231 struct efx_nic *efx = channel->efx;
Ben Hutchings8e730c12009-11-29 15:14:45 +00001232 unsigned int read_ptr;
1233 efx_qword_t event, *p_event;
1234 int ev_code;
Ben Hutchingsfa236e12010-04-28 09:29:42 +00001235 int tx_packets = 0;
1236 int spent = 0;
Ben Hutchings8e730c12009-11-29 15:14:45 +00001237
1238 read_ptr = channel->eventq_read_ptr;
1239
Ben Hutchingsfa236e12010-04-28 09:29:42 +00001240 for (;;) {
Ben Hutchings8e730c12009-11-29 15:14:45 +00001241 p_event = efx_event(channel, read_ptr);
1242 event = *p_event;
1243
1244 if (!efx_event_present(&event))
1245 /* End of events */
1246 break;
1247
Ben Hutchings62776d02010-06-23 11:30:07 +00001248 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1249 "channel %d event is "EFX_QWORD_FMT"\n",
1250 channel->channel, EFX_QWORD_VAL(event));
Ben Hutchings8e730c12009-11-29 15:14:45 +00001251
1252 /* Clear this event by marking it all ones */
1253 EFX_SET_QWORD(*p_event);
1254
Ben Hutchingsd4fabcc2011-04-04 14:22:11 +01001255 ++read_ptr;
Ben Hutchingsfa236e12010-04-28 09:29:42 +00001256
Ben Hutchings8e730c12009-11-29 15:14:45 +00001257 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1258
1259 switch (ev_code) {
1260 case FSE_AZ_EV_CODE_RX_EV:
1261 efx_handle_rx_event(channel, &event);
Ben Hutchingsfa236e12010-04-28 09:29:42 +00001262 if (++spent == budget)
1263 goto out;
Ben Hutchings8e730c12009-11-29 15:14:45 +00001264 break;
1265 case FSE_AZ_EV_CODE_TX_EV:
Ben Hutchingsfa236e12010-04-28 09:29:42 +00001266 tx_packets += efx_handle_tx_event(channel, &event);
Steve Hodgsonecc910f2010-09-10 06:42:22 +00001267 if (tx_packets > efx->txq_entries) {
Ben Hutchingsfa236e12010-04-28 09:29:42 +00001268 spent = budget;
1269 goto out;
1270 }
Ben Hutchings8e730c12009-11-29 15:14:45 +00001271 break;
1272 case FSE_AZ_EV_CODE_DRV_GEN_EV:
Steve Hodgson90d683a2010-06-01 11:19:39 +00001273 efx_handle_generated_event(channel, &event);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001274 break;
Ben Hutchings8e730c12009-11-29 15:14:45 +00001275 case FSE_AZ_EV_CODE_DRIVER_EV:
1276 efx_handle_driver_event(channel, &event);
1277 break;
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001278 case FSE_CZ_EV_CODE_USER_EV:
1279 efx_sriov_event(channel, &event);
1280 break;
Ben Hutchings8880f4e2009-11-29 15:15:41 +00001281 case FSE_CZ_EV_CODE_MCDI_EV:
1282 efx_mcdi_process_event(channel, &event);
1283 break;
Ben Hutchings40641ed2010-12-02 13:47:45 +00001284 case FSE_AZ_EV_CODE_GLOBAL_EV:
1285 if (efx->type->handle_global_event &&
1286 efx->type->handle_global_event(channel, &event))
1287 break;
1288 /* else fall through */
Ben Hutchings8e730c12009-11-29 15:14:45 +00001289 default:
Ben Hutchings62776d02010-06-23 11:30:07 +00001290 netif_err(channel->efx, hw, channel->efx->net_dev,
1291 "channel %d unknown event type %d (data "
1292 EFX_QWORD_FMT ")\n", channel->channel,
1293 ev_code, EFX_QWORD_VAL(event));
Ben Hutchings8e730c12009-11-29 15:14:45 +00001294 }
Ben Hutchingsfa236e12010-04-28 09:29:42 +00001295 }
Ben Hutchings8e730c12009-11-29 15:14:45 +00001296
Ben Hutchingsfa236e12010-04-28 09:29:42 +00001297out:
Ben Hutchings8e730c12009-11-29 15:14:45 +00001298 channel->eventq_read_ptr = read_ptr;
Ben Hutchingsfa236e12010-04-28 09:29:42 +00001299 return spent;
Ben Hutchings8e730c12009-11-29 15:14:45 +00001300}
1301
Ben Hutchingsd4fabcc2011-04-04 14:22:11 +01001302/* Check whether an event is present in the eventq at the current
1303 * read pointer. Only useful for self-test.
1304 */
1305bool efx_nic_event_present(struct efx_channel *channel)
1306{
1307 return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
1308}
Ben Hutchings8e730c12009-11-29 15:14:45 +00001309
1310/* Allocate buffer table entries for event queue */
1311int efx_nic_probe_eventq(struct efx_channel *channel)
1312{
1313 struct efx_nic *efx = channel->efx;
Steve Hodgsonecc910f2010-09-10 06:42:22 +00001314 unsigned entries;
1315
1316 entries = channel->eventq_mask + 1;
Ben Hutchings8e730c12009-11-29 15:14:45 +00001317 return efx_alloc_special_buffer(efx, &channel->eventq,
Steve Hodgsonecc910f2010-09-10 06:42:22 +00001318 entries * sizeof(efx_qword_t));
Ben Hutchings8e730c12009-11-29 15:14:45 +00001319}
1320
1321void efx_nic_init_eventq(struct efx_channel *channel)
1322{
Ben Hutchings8880f4e2009-11-29 15:15:41 +00001323 efx_oword_t reg;
Ben Hutchings8e730c12009-11-29 15:14:45 +00001324 struct efx_nic *efx = channel->efx;
1325
Ben Hutchings62776d02010-06-23 11:30:07 +00001326 netif_dbg(efx, hw, efx->net_dev,
1327 "channel %d event queue in special buffers %d-%d\n",
1328 channel->channel, channel->eventq.index,
1329 channel->eventq.index + channel->eventq.entries - 1);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001330
Ben Hutchings8880f4e2009-11-29 15:15:41 +00001331 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1332 EFX_POPULATE_OWORD_3(reg,
1333 FRF_CZ_TIMER_Q_EN, 1,
1334 FRF_CZ_HOST_NOTIFY_MODE, 0,
1335 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1336 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1337 }
1338
Ben Hutchings8e730c12009-11-29 15:14:45 +00001339 /* Pin event queue buffer */
1340 efx_init_special_buffer(efx, &channel->eventq);
1341
1342 /* Fill event queue with all ones (i.e. empty events) */
1343 memset(channel->eventq.addr, 0xff, channel->eventq.len);
1344
1345 /* Push event queue to card */
Ben Hutchings8880f4e2009-11-29 15:15:41 +00001346 EFX_POPULATE_OWORD_3(reg,
Ben Hutchings8e730c12009-11-29 15:14:45 +00001347 FRF_AZ_EVQ_EN, 1,
1348 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1349 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
Ben Hutchings8880f4e2009-11-29 15:15:41 +00001350 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
Ben Hutchings8e730c12009-11-29 15:14:45 +00001351 channel->channel);
1352
1353 efx->type->push_irq_moderation(channel);
1354}
1355
1356void efx_nic_fini_eventq(struct efx_channel *channel)
1357{
Ben Hutchings8880f4e2009-11-29 15:15:41 +00001358 efx_oword_t reg;
Ben Hutchings8e730c12009-11-29 15:14:45 +00001359 struct efx_nic *efx = channel->efx;
1360
1361 /* Remove event queue from card */
Ben Hutchings8880f4e2009-11-29 15:15:41 +00001362 EFX_ZERO_OWORD(reg);
1363 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
Ben Hutchings8e730c12009-11-29 15:14:45 +00001364 channel->channel);
Ben Hutchings8880f4e2009-11-29 15:15:41 +00001365 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1366 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001367
1368 /* Unpin event queue */
1369 efx_fini_special_buffer(efx, &channel->eventq);
1370}
1371
1372/* Free buffers backing event queue */
1373void efx_nic_remove_eventq(struct efx_channel *channel)
1374{
1375 efx_free_special_buffer(channel->efx, &channel->eventq);
1376}
1377
1378
Ben Hutchingseee6f6a2012-02-28 23:37:35 +00001379void efx_nic_event_test_start(struct efx_channel *channel)
Ben Hutchings8e730c12009-11-29 15:14:45 +00001380{
Ben Hutchingsdd407812012-02-28 23:40:21 +00001381 channel->event_test_cpu = -1;
Ben Hutchingseee6f6a2012-02-28 23:37:35 +00001382 smp_wmb();
Ben Hutchings4ef594e2012-02-07 23:39:18 +00001383 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
Steve Hodgson90d683a2010-06-01 11:19:39 +00001384}
1385
Ben Hutchings2ae75da2012-02-07 23:49:52 +00001386void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
Steve Hodgson90d683a2010-06-01 11:19:39 +00001387{
Ben Hutchings2ae75da2012-02-07 23:49:52 +00001388 efx_magic_event(efx_rx_queue_channel(rx_queue),
1389 EFX_CHANNEL_MAGIC_FILL(rx_queue));
Ben Hutchings8e730c12009-11-29 15:14:45 +00001390}
1391
1392/**************************************************************************
1393 *
Ben Hutchings8e730c12009-11-29 15:14:45 +00001394 * Hardware interrupts
1395 * The hardware interrupt handler does very little work; all the event
1396 * queue processing is carried out by per-channel tasklets.
1397 *
1398 **************************************************************************/
1399
1400/* Enable/disable/generate interrupts */
1401static inline void efx_nic_interrupts(struct efx_nic *efx,
1402 bool enabled, bool force)
1403{
1404 efx_oword_t int_en_reg_ker;
Ben Hutchings8880f4e2009-11-29 15:15:41 +00001405
1406 EFX_POPULATE_OWORD_3(int_en_reg_ker,
Ben Hutchings1646a6f2012-01-05 20:14:10 +00001407 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
Ben Hutchings8e730c12009-11-29 15:14:45 +00001408 FRF_AZ_KER_INT_KER, force,
1409 FRF_AZ_DRV_INT_EN_KER, enabled);
1410 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1411}
1412
1413void efx_nic_enable_interrupts(struct efx_nic *efx)
1414{
Ben Hutchings8e730c12009-11-29 15:14:45 +00001415 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1416 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1417
Ben Hutchings8e730c12009-11-29 15:14:45 +00001418 efx_nic_interrupts(efx, true, false);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001419}
1420
1421void efx_nic_disable_interrupts(struct efx_nic *efx)
1422{
1423 /* Disable interrupts */
1424 efx_nic_interrupts(efx, false, false);
1425}
1426
1427/* Generate a test interrupt
1428 * Interrupt must already have been enabled, otherwise nasty things
1429 * may happen.
1430 */
Ben Hutchingseee6f6a2012-02-28 23:37:35 +00001431void efx_nic_irq_test_start(struct efx_nic *efx)
Ben Hutchings8e730c12009-11-29 15:14:45 +00001432{
Ben Hutchingseee6f6a2012-02-28 23:37:35 +00001433 efx->last_irq_cpu = -1;
1434 smp_wmb();
Ben Hutchings8e730c12009-11-29 15:14:45 +00001435 efx_nic_interrupts(efx, true, true);
1436}
1437
1438/* Process a fatal interrupt
1439 * Disable bus mastering ASAP and schedule a reset
1440 */
1441irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1442{
1443 struct falcon_nic_data *nic_data = efx->nic_data;
1444 efx_oword_t *int_ker = efx->irq_status.addr;
1445 efx_oword_t fatal_intr;
1446 int error, mem_perr;
1447
1448 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1449 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1450
Ben Hutchings62776d02010-06-23 11:30:07 +00001451 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1452 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1453 EFX_OWORD_VAL(fatal_intr),
1454 error ? "disabling bus mastering" : "no recognised error");
Ben Hutchings8e730c12009-11-29 15:14:45 +00001455
1456 /* If this is a memory parity error dump which blocks are offending */
Steve Hodgson97e1eaa2010-04-28 09:28:52 +00001457 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1458 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
Ben Hutchings8e730c12009-11-29 15:14:45 +00001459 if (mem_perr) {
1460 efx_oword_t reg;
1461 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
Ben Hutchings62776d02010-06-23 11:30:07 +00001462 netif_err(efx, hw, efx->net_dev,
1463 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1464 EFX_OWORD_VAL(reg));
Ben Hutchings8e730c12009-11-29 15:14:45 +00001465 }
1466
1467 /* Disable both devices */
1468 pci_clear_master(efx->pci_dev);
1469 if (efx_nic_is_dual_func(efx))
1470 pci_clear_master(nic_data->pci_dev2);
1471 efx_nic_disable_interrupts(efx);
1472
1473 /* Count errors and reset or disable the NIC accordingly */
1474 if (efx->int_error_count == 0 ||
1475 time_after(jiffies, efx->int_error_expire)) {
1476 efx->int_error_count = 0;
1477 efx->int_error_expire =
1478 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1479 }
1480 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
Ben Hutchings62776d02010-06-23 11:30:07 +00001481 netif_err(efx, hw, efx->net_dev,
1482 "SYSTEM ERROR - reset scheduled\n");
Ben Hutchings8e730c12009-11-29 15:14:45 +00001483 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1484 } else {
Ben Hutchings62776d02010-06-23 11:30:07 +00001485 netif_err(efx, hw, efx->net_dev,
1486 "SYSTEM ERROR - max number of errors seen."
1487 "NIC will be disabled\n");
Ben Hutchings8e730c12009-11-29 15:14:45 +00001488 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1489 }
Steve Hodgson63695452010-04-28 09:27:36 +00001490
Ben Hutchings8e730c12009-11-29 15:14:45 +00001491 return IRQ_HANDLED;
1492}
1493
1494/* Handle a legacy interrupt
1495 * Acknowledges the interrupt and schedule event queue processing.
1496 */
1497static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1498{
1499 struct efx_nic *efx = dev_id;
1500 efx_oword_t *int_ker = efx->irq_status.addr;
1501 irqreturn_t result = IRQ_NONE;
1502 struct efx_channel *channel;
1503 efx_dword_t reg;
1504 u32 queues;
1505 int syserr;
1506
Ben Hutchings94dec6a2010-12-07 19:24:45 +00001507 /* Could this be ours? If interrupts are disabled then the
1508 * channel state may not be valid.
1509 */
1510 if (!efx->legacy_irq_enabled)
1511 return result;
1512
Ben Hutchings8e730c12009-11-29 15:14:45 +00001513 /* Read the ISR which also ACKs the interrupts */
1514 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1515 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1516
Ben Hutchings1646a6f2012-01-05 20:14:10 +00001517 /* Handle non-event-queue sources */
1518 if (queues & (1U << efx->irq_level)) {
Steve Hodgson63695452010-04-28 09:27:36 +00001519 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1520 if (unlikely(syserr))
1521 return efx_nic_fatal_interrupt(efx);
Ben Hutchings1646a6f2012-01-05 20:14:10 +00001522 efx->last_irq_cpu = raw_smp_processor_id();
Steve Hodgson63695452010-04-28 09:27:36 +00001523 }
Ben Hutchings8e730c12009-11-29 15:14:45 +00001524
Ben Hutchings8880f4e2009-11-29 15:15:41 +00001525 if (queues != 0) {
1526 if (EFX_WORKAROUND_15783(efx))
1527 efx->irq_zero_count = 0;
1528
1529 /* Schedule processing of any interrupting queues */
1530 efx_for_each_channel(channel, efx) {
1531 if (queues & 1)
Ben Hutchings1646a6f2012-01-05 20:14:10 +00001532 efx_schedule_channel_irq(channel);
Ben Hutchings8880f4e2009-11-29 15:15:41 +00001533 queues >>= 1;
Ben Hutchings8e730c12009-11-29 15:14:45 +00001534 }
Ben Hutchings8880f4e2009-11-29 15:15:41 +00001535 result = IRQ_HANDLED;
1536
Steve Hodgson41b7e4c2010-04-28 09:28:27 +00001537 } else if (EFX_WORKAROUND_15783(efx)) {
Ben Hutchings8880f4e2009-11-29 15:15:41 +00001538 efx_qword_t *event;
1539
Steve Hodgson41b7e4c2010-04-28 09:28:27 +00001540 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1541 * because this might be a shared interrupt. */
1542 if (efx->irq_zero_count++ == 0)
1543 result = IRQ_HANDLED;
1544
1545 /* Ensure we schedule or rearm all event queues */
Ben Hutchings8880f4e2009-11-29 15:15:41 +00001546 efx_for_each_channel(channel, efx) {
1547 event = efx_event(channel, channel->eventq_read_ptr);
1548 if (efx_event_present(event))
Ben Hutchings1646a6f2012-01-05 20:14:10 +00001549 efx_schedule_channel_irq(channel);
Steve Hodgson41b7e4c2010-04-28 09:28:27 +00001550 else
1551 efx_nic_eventq_read_ack(channel);
Ben Hutchings8880f4e2009-11-29 15:15:41 +00001552 }
Ben Hutchings8e730c12009-11-29 15:14:45 +00001553 }
1554
Ben Hutchings1646a6f2012-01-05 20:14:10 +00001555 if (result == IRQ_HANDLED)
Ben Hutchings62776d02010-06-23 11:30:07 +00001556 netif_vdbg(efx, intr, efx->net_dev,
1557 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1558 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
Ben Hutchings8e730c12009-11-29 15:14:45 +00001559
1560 return result;
1561}
1562
1563/* Handle an MSI interrupt
1564 *
1565 * Handle an MSI hardware interrupt. This routine schedules event
1566 * queue processing. No interrupt acknowledgement cycle is necessary.
1567 * Also, we never need to check that the interrupt is for us, since
1568 * MSI interrupts cannot be shared.
1569 */
1570static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1571{
Ben Hutchings46426102010-09-10 06:42:33 +00001572 struct efx_channel *channel = *(struct efx_channel **)dev_id;
Ben Hutchings8e730c12009-11-29 15:14:45 +00001573 struct efx_nic *efx = channel->efx;
1574 efx_oword_t *int_ker = efx->irq_status.addr;
1575 int syserr;
1576
Ben Hutchings62776d02010-06-23 11:30:07 +00001577 netif_vdbg(efx, intr, efx->net_dev,
1578 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1579 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
Ben Hutchings8e730c12009-11-29 15:14:45 +00001580
Ben Hutchings1646a6f2012-01-05 20:14:10 +00001581 /* Handle non-event-queue sources */
1582 if (channel->channel == efx->irq_level) {
Steve Hodgson63695452010-04-28 09:27:36 +00001583 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1584 if (unlikely(syserr))
1585 return efx_nic_fatal_interrupt(efx);
Ben Hutchings1646a6f2012-01-05 20:14:10 +00001586 efx->last_irq_cpu = raw_smp_processor_id();
Steve Hodgson63695452010-04-28 09:27:36 +00001587 }
Ben Hutchings8e730c12009-11-29 15:14:45 +00001588
1589 /* Schedule processing of the channel */
Ben Hutchings1646a6f2012-01-05 20:14:10 +00001590 efx_schedule_channel_irq(channel);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001591
1592 return IRQ_HANDLED;
1593}
1594
1595
1596/* Setup RSS indirection table.
1597 * This maps from the hash value of the packet to RXQ
1598 */
Ben Hutchings765c9f42010-06-30 05:06:28 +00001599void efx_nic_push_rx_indir_table(struct efx_nic *efx)
Ben Hutchings8e730c12009-11-29 15:14:45 +00001600{
Ben Hutchings765c9f42010-06-30 05:06:28 +00001601 size_t i = 0;
Ben Hutchings8e730c12009-11-29 15:14:45 +00001602 efx_dword_t dword;
1603
1604 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1605 return;
1606
Ben Hutchings765c9f42010-06-30 05:06:28 +00001607 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1608 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1609
1610 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
Ben Hutchings8e730c12009-11-29 15:14:45 +00001611 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
Ben Hutchings765c9f42010-06-30 05:06:28 +00001612 efx->rx_indir_table[i]);
1613 efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001614 }
1615}
1616
1617/* Hook interrupt handler(s)
1618 * Try MSI and then legacy interrupts.
1619 */
1620int efx_nic_init_interrupt(struct efx_nic *efx)
1621{
1622 struct efx_channel *channel;
1623 int rc;
1624
1625 if (!EFX_INT_MODE_USE_MSI(efx)) {
1626 irq_handler_t handler;
1627 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1628 handler = efx_legacy_interrupt;
1629 else
1630 handler = falcon_legacy_interrupt_a1;
1631
1632 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1633 efx->name, efx);
1634 if (rc) {
Ben Hutchings62776d02010-06-23 11:30:07 +00001635 netif_err(efx, drv, efx->net_dev,
1636 "failed to hook legacy IRQ %d\n",
1637 efx->pci_dev->irq);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001638 goto fail1;
1639 }
1640 return 0;
1641 }
1642
1643 /* Hook MSI or MSI-X interrupt */
1644 efx_for_each_channel(channel, efx) {
1645 rc = request_irq(channel->irq, efx_msi_interrupt,
1646 IRQF_PROBE_SHARED, /* Not shared */
Ben Hutchings46426102010-09-10 06:42:33 +00001647 efx->channel_name[channel->channel],
1648 &efx->channel[channel->channel]);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001649 if (rc) {
Ben Hutchings62776d02010-06-23 11:30:07 +00001650 netif_err(efx, drv, efx->net_dev,
1651 "failed to hook IRQ %d\n", channel->irq);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001652 goto fail2;
1653 }
1654 }
1655
1656 return 0;
1657
1658 fail2:
1659 efx_for_each_channel(channel, efx)
Ben Hutchings46426102010-09-10 06:42:33 +00001660 free_irq(channel->irq, &efx->channel[channel->channel]);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001661 fail1:
1662 return rc;
1663}
1664
1665void efx_nic_fini_interrupt(struct efx_nic *efx)
1666{
1667 struct efx_channel *channel;
1668 efx_oword_t reg;
1669
1670 /* Disable MSI/MSI-X interrupts */
1671 efx_for_each_channel(channel, efx) {
1672 if (channel->irq)
Ben Hutchings46426102010-09-10 06:42:33 +00001673 free_irq(channel->irq, &efx->channel[channel->channel]);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001674 }
1675
1676 /* ACK legacy interrupt */
1677 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1678 efx_reado(efx, &reg, FR_BZ_INT_ISR0);
1679 else
1680 falcon_irq_ack_a1(efx);
1681
1682 /* Disable legacy interrupt */
1683 if (efx->legacy_irq)
1684 free_irq(efx->legacy_irq, efx);
1685}
1686
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001687/* Looks at available SRAM resources and works out how many queues we
1688 * can support, and where things like descriptor caches should live.
1689 *
1690 * SRAM is split up as follows:
1691 * 0 buftbl entries for channels
1692 * efx->vf_buftbl_base buftbl entries for SR-IOV
1693 * efx->rx_dc_base RX descriptor caches
1694 * efx->tx_dc_base TX descriptor caches
1695 */
Ben Hutchings28e47c42012-02-15 01:58:49 +00001696void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
1697{
1698 unsigned vi_count, buftbl_min;
1699
1700 /* Account for the buffer table entries backing the datapath channels
1701 * and the descriptor caches for those channels.
1702 */
1703 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
1704 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
1705 efx->n_channels * EFX_MAX_EVQ_SIZE)
1706 * sizeof(efx_qword_t) / EFX_BUF_SIZE);
1707 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
1708
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001709#ifdef CONFIG_SFC_SRIOV
1710 if (efx_sriov_wanted(efx)) {
1711 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
1712
1713 efx->vf_buftbl_base = buftbl_min;
1714
1715 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
1716 vi_count = max(vi_count, EFX_VI_BASE);
1717 buftbl_free = (sram_lim_qw - buftbl_min -
1718 vi_count * vi_dc_entries);
1719
1720 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
1721 efx_vf_size(efx));
1722 vf_limit = min(buftbl_free / entries_per_vf,
1723 (1024U - EFX_VI_BASE) >> efx->vi_scale);
1724
1725 if (efx->vf_count > vf_limit) {
1726 netif_err(efx, probe, efx->net_dev,
1727 "Reducing VF count from from %d to %d\n",
1728 efx->vf_count, vf_limit);
1729 efx->vf_count = vf_limit;
1730 }
1731 vi_count += efx->vf_count * efx_vf_size(efx);
1732 }
1733#endif
1734
Ben Hutchings28e47c42012-02-15 01:58:49 +00001735 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1736 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1737}
1738
Ben Hutchings8e730c12009-11-29 15:14:45 +00001739u32 efx_nic_fpga_ver(struct efx_nic *efx)
1740{
1741 efx_oword_t altera_build;
1742 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1743 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1744}
1745
1746void efx_nic_init_common(struct efx_nic *efx)
1747{
1748 efx_oword_t temp;
1749
1750 /* Set positions of descriptor caches in SRAM. */
Ben Hutchings28e47c42012-02-15 01:58:49 +00001751 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001752 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
Ben Hutchings28e47c42012-02-15 01:58:49 +00001753 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001754 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1755
1756 /* Set TX descriptor cache size. */
1757 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1758 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1759 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1760
1761 /* Set RX descriptor cache size. Set low watermark to size-8, as
1762 * this allows most efficient prefetching.
1763 */
1764 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1765 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1766 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1767 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1768 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1769
1770 /* Program INT_KER address */
1771 EFX_POPULATE_OWORD_2(temp,
1772 FRF_AZ_NORM_INT_VEC_DIS_KER,
1773 EFX_INT_MODE_USE_MSI(efx),
1774 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1775 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1776
Steve Hodgson63695452010-04-28 09:27:36 +00001777 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1778 /* Use an interrupt level unused by event queues */
Ben Hutchings1646a6f2012-01-05 20:14:10 +00001779 efx->irq_level = 0x1f;
Steve Hodgson63695452010-04-28 09:27:36 +00001780 else
1781 /* Use a valid MSI-X vector */
Ben Hutchings1646a6f2012-01-05 20:14:10 +00001782 efx->irq_level = 0;
Steve Hodgson63695452010-04-28 09:27:36 +00001783
Ben Hutchings8e730c12009-11-29 15:14:45 +00001784 /* Enable all the genuinely fatal interrupts. (They are still
1785 * masked by the overall interrupt mask, controlled by
1786 * falcon_interrupts()).
1787 *
1788 * Note: All other fatal interrupts are enabled
1789 */
1790 EFX_POPULATE_OWORD_3(temp,
1791 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1792 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1793 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
Steve Hodgsonb17424b2010-04-28 09:25:22 +00001794 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1795 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001796 EFX_INVERT_OWORD(temp);
1797 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1798
Ben Hutchings765c9f42010-06-30 05:06:28 +00001799 efx_nic_push_rx_indir_table(efx);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001800
1801 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1802 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1803 */
1804 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1805 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1806 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1807 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
Ben Hutchingscd385572010-11-15 23:53:11 +00001808 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001809 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1810 /* Enable SW_EV to inherit in char driver - assume harmless here */
1811 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1812 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1813 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
Ben Hutchings286d47b2009-12-23 13:49:13 +00001814 /* Disable hardware watchdog which can misfire */
1815 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
Ben Hutchings8e730c12009-11-29 15:14:45 +00001816 /* Squash TX of packets of 16 bytes or less */
1817 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1818 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1819 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
Ben Hutchings94b274b2011-01-10 21:18:20 +00001820
1821 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1822 EFX_POPULATE_OWORD_4(temp,
1823 /* Default values */
1824 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1825 FRF_BZ_TX_PACE_SB_AF, 0xb,
1826 FRF_BZ_TX_PACE_FB_BASE, 0,
1827 /* Allow large pace values in the
1828 * fast bin. */
1829 FRF_BZ_TX_PACE_BIN_TH,
1830 FFE_BZ_TX_PACE_RESERVED);
1831 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1832 }
Ben Hutchings8e730c12009-11-29 15:14:45 +00001833}
Ben Hutchings5b98c1b2010-06-21 03:06:53 +00001834
1835/* Register dump */
1836
1837#define REGISTER_REVISION_A 1
1838#define REGISTER_REVISION_B 2
1839#define REGISTER_REVISION_C 3
1840#define REGISTER_REVISION_Z 3 /* latest revision */
1841
1842struct efx_nic_reg {
1843 u32 offset:24;
1844 u32 min_revision:2, max_revision:2;
1845};
1846
1847#define REGISTER(name, min_rev, max_rev) { \
1848 FR_ ## min_rev ## max_rev ## _ ## name, \
1849 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
1850}
1851#define REGISTER_AA(name) REGISTER(name, A, A)
1852#define REGISTER_AB(name) REGISTER(name, A, B)
1853#define REGISTER_AZ(name) REGISTER(name, A, Z)
1854#define REGISTER_BB(name) REGISTER(name, B, B)
1855#define REGISTER_BZ(name) REGISTER(name, B, Z)
1856#define REGISTER_CZ(name) REGISTER(name, C, Z)
1857
1858static const struct efx_nic_reg efx_nic_regs[] = {
1859 REGISTER_AZ(ADR_REGION),
1860 REGISTER_AZ(INT_EN_KER),
1861 REGISTER_BZ(INT_EN_CHAR),
1862 REGISTER_AZ(INT_ADR_KER),
1863 REGISTER_BZ(INT_ADR_CHAR),
1864 /* INT_ACK_KER is WO */
1865 /* INT_ISR0 is RC */
1866 REGISTER_AZ(HW_INIT),
1867 REGISTER_CZ(USR_EV_CFG),
1868 REGISTER_AB(EE_SPI_HCMD),
1869 REGISTER_AB(EE_SPI_HADR),
1870 REGISTER_AB(EE_SPI_HDATA),
1871 REGISTER_AB(EE_BASE_PAGE),
1872 REGISTER_AB(EE_VPD_CFG0),
1873 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
1874 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
1875 /* PCIE_CORE_INDIRECT is indirect */
1876 REGISTER_AB(NIC_STAT),
1877 REGISTER_AB(GPIO_CTL),
1878 REGISTER_AB(GLB_CTL),
1879 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
1880 REGISTER_BZ(DP_CTRL),
1881 REGISTER_AZ(MEM_STAT),
1882 REGISTER_AZ(CS_DEBUG),
1883 REGISTER_AZ(ALTERA_BUILD),
1884 REGISTER_AZ(CSR_SPARE),
1885 REGISTER_AB(PCIE_SD_CTL0123),
1886 REGISTER_AB(PCIE_SD_CTL45),
1887 REGISTER_AB(PCIE_PCS_CTL_STAT),
1888 /* DEBUG_DATA_OUT is not used */
1889 /* DRV_EV is WO */
1890 REGISTER_AZ(EVQ_CTL),
1891 REGISTER_AZ(EVQ_CNT1),
1892 REGISTER_AZ(EVQ_CNT2),
1893 REGISTER_AZ(BUF_TBL_CFG),
1894 REGISTER_AZ(SRM_RX_DC_CFG),
1895 REGISTER_AZ(SRM_TX_DC_CFG),
1896 REGISTER_AZ(SRM_CFG),
1897 /* BUF_TBL_UPD is WO */
1898 REGISTER_AZ(SRM_UPD_EVQ),
1899 REGISTER_AZ(SRAM_PARITY),
1900 REGISTER_AZ(RX_CFG),
1901 REGISTER_BZ(RX_FILTER_CTL),
1902 /* RX_FLUSH_DESCQ is WO */
1903 REGISTER_AZ(RX_DC_CFG),
1904 REGISTER_AZ(RX_DC_PF_WM),
1905 REGISTER_BZ(RX_RSS_TKEY),
1906 /* RX_NODESC_DROP is RC */
1907 REGISTER_AA(RX_SELF_RST),
1908 /* RX_DEBUG, RX_PUSH_DROP are not used */
1909 REGISTER_CZ(RX_RSS_IPV6_REG1),
1910 REGISTER_CZ(RX_RSS_IPV6_REG2),
1911 REGISTER_CZ(RX_RSS_IPV6_REG3),
1912 /* TX_FLUSH_DESCQ is WO */
1913 REGISTER_AZ(TX_DC_CFG),
1914 REGISTER_AA(TX_CHKSM_CFG),
1915 REGISTER_AZ(TX_CFG),
1916 /* TX_PUSH_DROP is not used */
1917 REGISTER_AZ(TX_RESERVED),
1918 REGISTER_BZ(TX_PACE),
1919 /* TX_PACE_DROP_QID is RC */
1920 REGISTER_BB(TX_VLAN),
1921 REGISTER_BZ(TX_IPFIL_PORTEN),
1922 REGISTER_AB(MD_TXD),
1923 REGISTER_AB(MD_RXD),
1924 REGISTER_AB(MD_CS),
1925 REGISTER_AB(MD_PHY_ADR),
1926 REGISTER_AB(MD_ID),
1927 /* MD_STAT is RC */
1928 REGISTER_AB(MAC_STAT_DMA),
1929 REGISTER_AB(MAC_CTRL),
1930 REGISTER_BB(GEN_MODE),
1931 REGISTER_AB(MAC_MC_HASH_REG0),
1932 REGISTER_AB(MAC_MC_HASH_REG1),
1933 REGISTER_AB(GM_CFG1),
1934 REGISTER_AB(GM_CFG2),
1935 /* GM_IPG and GM_HD are not used */
1936 REGISTER_AB(GM_MAX_FLEN),
1937 /* GM_TEST is not used */
1938 REGISTER_AB(GM_ADR1),
1939 REGISTER_AB(GM_ADR2),
1940 REGISTER_AB(GMF_CFG0),
1941 REGISTER_AB(GMF_CFG1),
1942 REGISTER_AB(GMF_CFG2),
1943 REGISTER_AB(GMF_CFG3),
1944 REGISTER_AB(GMF_CFG4),
1945 REGISTER_AB(GMF_CFG5),
1946 REGISTER_BB(TX_SRC_MAC_CTL),
1947 REGISTER_AB(XM_ADR_LO),
1948 REGISTER_AB(XM_ADR_HI),
1949 REGISTER_AB(XM_GLB_CFG),
1950 REGISTER_AB(XM_TX_CFG),
1951 REGISTER_AB(XM_RX_CFG),
1952 REGISTER_AB(XM_MGT_INT_MASK),
1953 REGISTER_AB(XM_FC),
1954 REGISTER_AB(XM_PAUSE_TIME),
1955 REGISTER_AB(XM_TX_PARAM),
1956 REGISTER_AB(XM_RX_PARAM),
1957 /* XM_MGT_INT_MSK (note no 'A') is RC */
1958 REGISTER_AB(XX_PWR_RST),
1959 REGISTER_AB(XX_SD_CTL),
1960 REGISTER_AB(XX_TXDRV_CTL),
1961 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
1962 /* XX_CORE_STAT is partly RC */
1963};
1964
1965struct efx_nic_reg_table {
1966 u32 offset:24;
1967 u32 min_revision:2, max_revision:2;
1968 u32 step:6, rows:21;
1969};
1970
1971#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
1972 offset, \
1973 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
1974 step, rows \
1975}
Ben Hutchings9c636ba2012-01-05 17:19:45 +00001976#define REGISTER_TABLE(name, min_rev, max_rev) \
Ben Hutchings5b98c1b2010-06-21 03:06:53 +00001977 REGISTER_TABLE_DIMENSIONS( \
1978 name, FR_ ## min_rev ## max_rev ## _ ## name, \
1979 min_rev, max_rev, \
1980 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
1981 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
1982#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
1983#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
1984#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
1985#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
1986#define REGISTER_TABLE_BB_CZ(name) \
1987 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
1988 FR_BZ_ ## name ## _STEP, \
1989 FR_BB_ ## name ## _ROWS), \
1990 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
1991 FR_BZ_ ## name ## _STEP, \
1992 FR_CZ_ ## name ## _ROWS)
1993#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
1994
1995static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1996 /* DRIVER is not used */
1997 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
1998 REGISTER_TABLE_BB(TX_IPFIL_TBL),
1999 REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
2000 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
2001 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
2002 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
2003 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
2004 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
2005 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
Ben Hutchings75abc512010-09-20 08:43:53 +00002006 /* We can't reasonably read all of the buffer table (up to 8MB!).
Ben Hutchings5b98c1b2010-06-21 03:06:53 +00002007 * However this driver will only use a few entries. Reading
2008 * 1K entries allows for some expansion of queue count and
2009 * size before we need to change the version. */
2010 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
2011 A, A, 8, 1024),
2012 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
2013 B, Z, 8, 1024),
Ben Hutchings5b98c1b2010-06-21 03:06:53 +00002014 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
2015 REGISTER_TABLE_BB_CZ(TIMER_TBL),
2016 REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
2017 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
2018 /* TX_FILTER_TBL0 is huge and not used by this driver */
2019 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
2020 REGISTER_TABLE_CZ(MC_TREG_SMEM),
2021 /* MSIX_PBA_TABLE is not mapped */
2022 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
Ben Hutchings75abc512010-09-20 08:43:53 +00002023 REGISTER_TABLE_BZ(RX_FILTER_TBL0),
Ben Hutchings5b98c1b2010-06-21 03:06:53 +00002024};
2025
2026size_t efx_nic_get_regs_len(struct efx_nic *efx)
2027{
2028 const struct efx_nic_reg *reg;
2029 const struct efx_nic_reg_table *table;
2030 size_t len = 0;
2031
2032 for (reg = efx_nic_regs;
2033 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
2034 reg++)
2035 if (efx->type->revision >= reg->min_revision &&
2036 efx->type->revision <= reg->max_revision)
2037 len += sizeof(efx_oword_t);
2038
2039 for (table = efx_nic_reg_tables;
2040 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
2041 table++)
2042 if (efx->type->revision >= table->min_revision &&
2043 efx->type->revision <= table->max_revision)
2044 len += table->rows * min_t(size_t, table->step, 16);
2045
2046 return len;
2047}
2048
2049void efx_nic_get_regs(struct efx_nic *efx, void *buf)
2050{
2051 const struct efx_nic_reg *reg;
2052 const struct efx_nic_reg_table *table;
2053
2054 for (reg = efx_nic_regs;
2055 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
2056 reg++) {
2057 if (efx->type->revision >= reg->min_revision &&
2058 efx->type->revision <= reg->max_revision) {
2059 efx_reado(efx, (efx_oword_t *)buf, reg->offset);
2060 buf += sizeof(efx_oword_t);
2061 }
2062 }
2063
2064 for (table = efx_nic_reg_tables;
2065 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
2066 table++) {
2067 size_t size, i;
2068
2069 if (!(efx->type->revision >= table->min_revision &&
2070 efx->type->revision <= table->max_revision))
2071 continue;
2072
2073 size = min_t(size_t, table->step, 16);
2074
2075 for (i = 0; i < table->rows; i++) {
2076 switch (table->step) {
2077 case 4: /* 32-bit register or SRAM */
2078 efx_readd_table(efx, buf, table->offset, i);
2079 break;
2080 case 8: /* 64-bit SRAM */
2081 efx_sram_readq(efx,
2082 efx->membase + table->offset,
2083 buf, i);
2084 break;
2085 case 16: /* 128-bit register */
2086 efx_reado_table(efx, buf, table->offset, i);
2087 break;
2088 case 32: /* 128-bit register, interleaved */
2089 efx_reado_table(efx, buf, table->offset, 2 * i);
2090 break;
2091 default:
2092 WARN_ON(1);
2093 return;
2094 }
2095 buf += size;
2096 }
2097 }
2098}