blob: 7c6ee0c9b6fc8627ac797b3183b241c1abf9c6dc [file] [log] [blame]
Divy Le Ray4d22de32007-01-18 22:04:14 -05001/*
Divy Le Raya02d44a2008-10-13 18:47:30 -07002 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
Divy Le Ray4d22de32007-01-18 22:04:14 -05003 *
Divy Le Ray1d68e932007-01-30 19:44:35 -08004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Divy Le Ray4d22de32007-01-18 22:04:14 -05009 *
Divy Le Ray1d68e932007-01-30 19:44:35 -080010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Divy Le Ray4d22de32007-01-18 22:04:14 -050031 */
Divy Le Ray4d22de32007-01-18 22:04:14 -050032#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
Divy Le Rayf2c68792007-01-30 19:44:13 -080037/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
Divy Le Ray4d22de32007-01-18 22:04:14 -050052
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
Divy Le Rayb8819552007-12-17 18:47:31 -080065 return -EAGAIN;
Divy Le Ray4d22de32007-01-18 22:04:14 -050066 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -0700122static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
Divy Le Ray04497982008-10-08 17:38:29 -0700197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500198
Divy Le Ray4d22de32007-01-18 22:04:14 -0500199 t3_write_reg(adap, A_MI1_CFG, val);
200}
201
Divy Le Ray04497982008-10-08 17:38:29 -0700202#define MDIO_ATTEMPTS 20
Divy Le Ray4d22de32007-01-18 22:04:14 -0500203
204/*
Divy Le Ray04497982008-10-08 17:38:29 -0700205 * MI1 read/write operations for clause 22 PHYs.
Divy Le Ray4d22de32007-01-18 22:04:14 -0500206 */
Divy Le Ray04497982008-10-08 17:38:29 -0700207static int t3_mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
208 int reg_addr, unsigned int *valp)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500209{
210 int ret;
211 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
212
213 if (mmd_addr)
214 return -EINVAL;
215
216 mutex_lock(&adapter->mdio_lock);
Divy Le Ray04497982008-10-08 17:38:29 -0700217 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
Divy Le Ray4d22de32007-01-18 22:04:14 -0500218 t3_write_reg(adapter, A_MI1_ADDR, addr);
219 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
Divy Le Ray04497982008-10-08 17:38:29 -0700220 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500221 if (!ret)
222 *valp = t3_read_reg(adapter, A_MI1_DATA);
223 mutex_unlock(&adapter->mdio_lock);
224 return ret;
225}
226
Divy Le Ray04497982008-10-08 17:38:29 -0700227static int t3_mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500228 int reg_addr, unsigned int val)
229{
230 int ret;
231 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
232
233 if (mmd_addr)
234 return -EINVAL;
235
236 mutex_lock(&adapter->mdio_lock);
Divy Le Ray04497982008-10-08 17:38:29 -0700237 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
Divy Le Ray4d22de32007-01-18 22:04:14 -0500238 t3_write_reg(adapter, A_MI1_ADDR, addr);
239 t3_write_reg(adapter, A_MI1_DATA, val);
240 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
Divy Le Ray04497982008-10-08 17:38:29 -0700241 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500242 mutex_unlock(&adapter->mdio_lock);
243 return ret;
244}
245
246static const struct mdio_ops mi1_mdio_ops = {
Divy Le Ray04497982008-10-08 17:38:29 -0700247 t3_mi1_read,
248 t3_mi1_write
Divy Le Ray4d22de32007-01-18 22:04:14 -0500249};
250
251/*
Divy Le Ray04497982008-10-08 17:38:29 -0700252 * Performs the address cycle for clause 45 PHYs.
253 * Must be called with the MDIO_LOCK held.
254 */
255static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr)
257{
258 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
259
260 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
261 t3_write_reg(adapter, A_MI1_ADDR, addr);
262 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
263 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
264 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
265 MDIO_ATTEMPTS, 10);
266}
267
268/*
Divy Le Ray4d22de32007-01-18 22:04:14 -0500269 * MI1 read/write operations for indirect-addressed PHYs.
270 */
271static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
272 int reg_addr, unsigned int *valp)
273{
274 int ret;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500275
276 mutex_lock(&adapter->mdio_lock);
Divy Le Ray04497982008-10-08 17:38:29 -0700277 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500278 if (!ret) {
279 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
280 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
Divy Le Ray04497982008-10-08 17:38:29 -0700281 MDIO_ATTEMPTS, 10);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500282 if (!ret)
283 *valp = t3_read_reg(adapter, A_MI1_DATA);
284 }
285 mutex_unlock(&adapter->mdio_lock);
286 return ret;
287}
288
289static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
290 int reg_addr, unsigned int val)
291{
292 int ret;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500293
294 mutex_lock(&adapter->mdio_lock);
Divy Le Ray04497982008-10-08 17:38:29 -0700295 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500296 if (!ret) {
297 t3_write_reg(adapter, A_MI1_DATA, val);
298 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
299 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
Divy Le Ray04497982008-10-08 17:38:29 -0700300 MDIO_ATTEMPTS, 10);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500301 }
302 mutex_unlock(&adapter->mdio_lock);
303 return ret;
304}
305
306static const struct mdio_ops mi1_mdio_ext_ops = {
307 mi1_ext_read,
308 mi1_ext_write
309};
310
311/**
312 * t3_mdio_change_bits - modify the value of a PHY register
313 * @phy: the PHY to operate on
314 * @mmd: the device address
315 * @reg: the register address
316 * @clear: what part of the register value to mask off
317 * @set: what part of the register value to set
318 *
319 * Changes the value of a PHY register by applying a mask to its current
320 * value and ORing the result with a new value.
321 */
322int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
323 unsigned int set)
324{
325 int ret;
326 unsigned int val;
327
328 ret = mdio_read(phy, mmd, reg, &val);
329 if (!ret) {
330 val &= ~clear;
331 ret = mdio_write(phy, mmd, reg, val | set);
332 }
333 return ret;
334}
335
336/**
337 * t3_phy_reset - reset a PHY block
338 * @phy: the PHY to operate on
339 * @mmd: the device address of the PHY block to reset
340 * @wait: how long to wait for the reset to complete in 1ms increments
341 *
342 * Resets a PHY block and optionally waits for the reset to complete.
343 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
344 * for 10G PHYs.
345 */
346int t3_phy_reset(struct cphy *phy, int mmd, int wait)
347{
348 int err;
349 unsigned int ctl;
350
351 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
352 if (err || !wait)
353 return err;
354
355 do {
356 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
357 if (err)
358 return err;
359 ctl &= BMCR_RESET;
360 if (ctl)
361 msleep(1);
362 } while (ctl && --wait);
363
364 return ctl ? -1 : 0;
365}
366
367/**
368 * t3_phy_advertise - set the PHY advertisement registers for autoneg
369 * @phy: the PHY to operate on
370 * @advert: bitmap of capabilities the PHY should advertise
371 *
372 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
373 * requested capabilities.
374 */
375int t3_phy_advertise(struct cphy *phy, unsigned int advert)
376{
377 int err;
378 unsigned int val = 0;
379
380 err = mdio_read(phy, 0, MII_CTRL1000, &val);
381 if (err)
382 return err;
383
384 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
385 if (advert & ADVERTISED_1000baseT_Half)
386 val |= ADVERTISE_1000HALF;
387 if (advert & ADVERTISED_1000baseT_Full)
388 val |= ADVERTISE_1000FULL;
389
390 err = mdio_write(phy, 0, MII_CTRL1000, val);
391 if (err)
392 return err;
393
394 val = 1;
395 if (advert & ADVERTISED_10baseT_Half)
396 val |= ADVERTISE_10HALF;
397 if (advert & ADVERTISED_10baseT_Full)
398 val |= ADVERTISE_10FULL;
399 if (advert & ADVERTISED_100baseT_Half)
400 val |= ADVERTISE_100HALF;
401 if (advert & ADVERTISED_100baseT_Full)
402 val |= ADVERTISE_100FULL;
403 if (advert & ADVERTISED_Pause)
404 val |= ADVERTISE_PAUSE_CAP;
405 if (advert & ADVERTISED_Asym_Pause)
406 val |= ADVERTISE_PAUSE_ASYM;
407 return mdio_write(phy, 0, MII_ADVERTISE, val);
408}
409
410/**
Divy Le Ray0ce2f032008-10-08 17:40:28 -0700411 * t3_phy_advertise_fiber - set fiber PHY advertisement register
412 * @phy: the PHY to operate on
413 * @advert: bitmap of capabilities the PHY should advertise
414 *
415 * Sets a fiber PHY's advertisement register to advertise the
416 * requested capabilities.
417 */
418int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
419{
420 unsigned int val = 0;
421
422 if (advert & ADVERTISED_1000baseT_Half)
423 val |= ADVERTISE_1000XHALF;
424 if (advert & ADVERTISED_1000baseT_Full)
425 val |= ADVERTISE_1000XFULL;
426 if (advert & ADVERTISED_Pause)
427 val |= ADVERTISE_1000XPAUSE;
428 if (advert & ADVERTISED_Asym_Pause)
429 val |= ADVERTISE_1000XPSE_ASYM;
430 return mdio_write(phy, 0, MII_ADVERTISE, val);
431}
432
433/**
Divy Le Ray4d22de32007-01-18 22:04:14 -0500434 * t3_set_phy_speed_duplex - force PHY speed and duplex
435 * @phy: the PHY to operate on
436 * @speed: requested PHY speed
437 * @duplex: requested PHY duplex
438 *
439 * Force a 10/100/1000 PHY's speed and duplex. This also disables
440 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
441 */
442int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
443{
444 int err;
445 unsigned int ctl;
446
447 err = mdio_read(phy, 0, MII_BMCR, &ctl);
448 if (err)
449 return err;
450
451 if (speed >= 0) {
452 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
453 if (speed == SPEED_100)
454 ctl |= BMCR_SPEED100;
455 else if (speed == SPEED_1000)
456 ctl |= BMCR_SPEED1000;
457 }
458 if (duplex >= 0) {
459 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
460 if (duplex == DUPLEX_FULL)
461 ctl |= BMCR_FULLDPLX;
462 }
463 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
464 ctl |= BMCR_ANENABLE;
465 return mdio_write(phy, 0, MII_BMCR, ctl);
466}
467
Divy Le Ray9b1e3652008-10-08 17:39:31 -0700468int t3_phy_lasi_intr_enable(struct cphy *phy)
469{
470 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
471}
472
473int t3_phy_lasi_intr_disable(struct cphy *phy)
474{
475 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
476}
477
478int t3_phy_lasi_intr_clear(struct cphy *phy)
479{
480 u32 val;
481
482 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
483}
484
485int t3_phy_lasi_intr_handler(struct cphy *phy)
486{
487 unsigned int status;
488 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
489
490 if (err)
491 return err;
492 return (status & 1) ? cphy_cause_link_change : 0;
493}
494
Divy Le Ray4d22de32007-01-18 22:04:14 -0500495static const struct adapter_info t3_adap_info[] = {
Divy Le Ray04497982008-10-08 17:38:29 -0700496 {2, 0,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500497 F_GPIO2_OEN | F_GPIO4_OEN |
Divy Le Rayf231e0a2008-10-08 17:39:00 -0700498 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500499 &mi1_mdio_ops, "Chelsio PE9000"},
Divy Le Ray04497982008-10-08 17:38:29 -0700500 {2, 0,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500501 F_GPIO2_OEN | F_GPIO4_OEN |
Divy Le Rayf231e0a2008-10-08 17:39:00 -0700502 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500503 &mi1_mdio_ops, "Chelsio T302"},
Divy Le Ray04497982008-10-08 17:38:29 -0700504 {1, 0,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500505 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
Divy Le Ray75758e82007-12-05 10:15:01 -0800506 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
Divy Le Rayf231e0a2008-10-08 17:39:00 -0700507 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500508 &mi1_mdio_ext_ops, "Chelsio T310"},
Divy Le Ray04497982008-10-08 17:38:29 -0700509 {2, 0,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500510 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
511 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
Divy Le Rayf231e0a2008-10-08 17:39:00 -0700512 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
513 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500514 &mi1_mdio_ext_ops, "Chelsio T320"},
Divy Le Rayce03aad2009-02-18 17:47:57 -0800515 {},
516 {},
517 {1, 0,
518 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
519 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
521 &mi1_mdio_ext_ops, "Chelsio T310" },
Divy Le Ray4d22de32007-01-18 22:04:14 -0500522};
523
524/*
525 * Return the adapter_info structure with a given index. Out-of-range indices
526 * return NULL.
527 */
528const struct adapter_info *t3_get_adapter_info(unsigned int id)
529{
530 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
531}
532
Divy Le Ray04497982008-10-08 17:38:29 -0700533struct port_type_info {
534 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
535 int phy_addr, const struct mdio_ops *ops);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500536};
537
Divy Le Ray04497982008-10-08 17:38:29 -0700538static const struct port_type_info port_types[] = {
539 { NULL },
540 { t3_ael1002_phy_prep },
541 { t3_vsc8211_phy_prep },
542 { NULL},
543 { t3_xaui_direct_phy_prep },
Divy Le Ray1e882022008-10-08 17:40:07 -0700544 { t3_ael2005_phy_prep },
Divy Le Ray04497982008-10-08 17:38:29 -0700545 { t3_qt2045_phy_prep },
546 { t3_ael1006_phy_prep },
547 { NULL },
548};
Divy Le Ray4d22de32007-01-18 22:04:14 -0500549
550#define VPD_ENTRY(name, len) \
551 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
552
553/*
554 * Partial EEPROM Vital Product Data structure. Includes only the ID and
555 * VPD-R sections.
556 */
557struct t3_vpd {
558 u8 id_tag;
559 u8 id_len[2];
560 u8 id_data[16];
561 u8 vpdr_tag;
562 u8 vpdr_len[2];
563 VPD_ENTRY(pn, 16); /* part number */
564 VPD_ENTRY(ec, 16); /* EC level */
Divy Le Ray167cdf52007-08-21 20:49:36 -0700565 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
Divy Le Ray4d22de32007-01-18 22:04:14 -0500566 VPD_ENTRY(na, 12); /* MAC address base */
567 VPD_ENTRY(cclk, 6); /* core clock */
568 VPD_ENTRY(mclk, 6); /* mem clock */
569 VPD_ENTRY(uclk, 6); /* uP clk */
570 VPD_ENTRY(mdc, 6); /* MDIO clk */
571 VPD_ENTRY(mt, 2); /* mem timing */
572 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
573 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
574 VPD_ENTRY(port0, 2); /* PHY0 complex */
575 VPD_ENTRY(port1, 2); /* PHY1 complex */
576 VPD_ENTRY(port2, 2); /* PHY2 complex */
577 VPD_ENTRY(port3, 2); /* PHY3 complex */
578 VPD_ENTRY(rv, 1); /* csum */
579 u32 pad; /* for multiple-of-4 sizing and alignment */
580};
581
Divy Le Ray9f643062008-11-09 00:55:28 -0800582#define EEPROM_MAX_POLL 40
Divy Le Ray4d22de32007-01-18 22:04:14 -0500583#define EEPROM_STAT_ADDR 0x4000
584#define VPD_BASE 0xc00
585
586/**
587 * t3_seeprom_read - read a VPD EEPROM location
588 * @adapter: adapter to read
589 * @addr: EEPROM address
590 * @data: where to store the read data
591 *
592 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
593 * VPD ROM capability. A zero is written to the flag bit when the
594 * addres is written to the control register. The hardware device will
595 * set the flag to 1 when 4 bytes have been read into the data register.
596 */
Al Viro05e5c112007-12-22 18:56:23 +0000597int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500598{
599 u16 val;
600 int attempts = EEPROM_MAX_POLL;
Al Viro05e5c112007-12-22 18:56:23 +0000601 u32 v;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500602 unsigned int base = adapter->params.pci.vpd_cap_addr;
603
604 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
605 return -EINVAL;
606
607 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
608 do {
609 udelay(10);
610 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
611 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
612
613 if (!(val & PCI_VPD_ADDR_F)) {
614 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
615 return -EIO;
616 }
Al Viro05e5c112007-12-22 18:56:23 +0000617 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
618 *data = cpu_to_le32(v);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500619 return 0;
620}
621
622/**
623 * t3_seeprom_write - write a VPD EEPROM location
624 * @adapter: adapter to write
625 * @addr: EEPROM address
626 * @data: value to write
627 *
628 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
629 * VPD ROM capability.
630 */
Al Viro05e5c112007-12-22 18:56:23 +0000631int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500632{
633 u16 val;
634 int attempts = EEPROM_MAX_POLL;
635 unsigned int base = adapter->params.pci.vpd_cap_addr;
636
637 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
638 return -EINVAL;
639
640 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
Al Viro05e5c112007-12-22 18:56:23 +0000641 le32_to_cpu(data));
Divy Le Ray4d22de32007-01-18 22:04:14 -0500642 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
643 addr | PCI_VPD_ADDR_F);
644 do {
645 msleep(1);
646 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
647 } while ((val & PCI_VPD_ADDR_F) && --attempts);
648
649 if (val & PCI_VPD_ADDR_F) {
650 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
651 return -EIO;
652 }
653 return 0;
654}
655
656/**
657 * t3_seeprom_wp - enable/disable EEPROM write protection
658 * @adapter: the adapter
659 * @enable: 1 to enable write protection, 0 to disable it
660 *
661 * Enables or disables write protection on the serial EEPROM.
662 */
663int t3_seeprom_wp(struct adapter *adapter, int enable)
664{
665 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
666}
667
668/*
669 * Convert a character holding a hex digit to a number.
670 */
671static unsigned int hex2int(unsigned char c)
672{
673 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
674}
675
676/**
677 * get_vpd_params - read VPD parameters from VPD EEPROM
678 * @adapter: adapter to read
679 * @p: where to store the parameters
680 *
681 * Reads card parameters stored in VPD EEPROM.
682 */
683static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
684{
685 int i, addr, ret;
686 struct t3_vpd vpd;
687
688 /*
689 * Card information is normally at VPD_BASE but some early cards had
690 * it at 0.
691 */
Al Viro05e5c112007-12-22 18:56:23 +0000692 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500693 if (ret)
694 return ret;
695 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
696
697 for (i = 0; i < sizeof(vpd); i += 4) {
698 ret = t3_seeprom_read(adapter, addr + i,
Al Viro05e5c112007-12-22 18:56:23 +0000699 (__le32 *)((u8 *)&vpd + i));
Divy Le Ray4d22de32007-01-18 22:04:14 -0500700 if (ret)
701 return ret;
702 }
703
704 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
705 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
706 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
707 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
708 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
Divy Le Ray167cdf52007-08-21 20:49:36 -0700709 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500710
711 /* Old eeproms didn't have port information */
712 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
713 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
714 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
715 } else {
716 p->port_type[0] = hex2int(vpd.port0_data[0]);
717 p->port_type[1] = hex2int(vpd.port1_data[0]);
718 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
719 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
720 }
721
722 for (i = 0; i < 6; i++)
723 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
724 hex2int(vpd.na_data[2 * i + 1]);
725 return 0;
726}
727
728/* serial flash and firmware constants */
729enum {
730 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
731 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
732 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
733
734 /* flash command opcodes */
735 SF_PROG_PAGE = 2, /* program page */
736 SF_WR_DISABLE = 4, /* disable writes */
737 SF_RD_STATUS = 5, /* read status register */
738 SF_WR_ENABLE = 6, /* enable writes */
739 SF_RD_DATA_FAST = 0xb, /* read flash */
740 SF_ERASE_SECTOR = 0xd8, /* erase sector */
741
742 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
Steve Wise80513672008-07-26 15:40:56 -0500743 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
Divy Le Ray2e283962007-03-18 13:10:06 -0700744 FW_MIN_SIZE = 8 /* at least version and csum */
Divy Le Ray4d22de32007-01-18 22:04:14 -0500745};
746
747/**
748 * sf1_read - read data from the serial flash
749 * @adapter: the adapter
750 * @byte_cnt: number of bytes to read
751 * @cont: whether another operation will be chained
752 * @valp: where to store the read data
753 *
754 * Reads up to 4 bytes of data from the serial flash. The location of
755 * the read needs to be specified prior to calling this by issuing the
756 * appropriate commands to the serial flash.
757 */
758static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
759 u32 *valp)
760{
761 int ret;
762
763 if (!byte_cnt || byte_cnt > 4)
764 return -EINVAL;
765 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
766 return -EBUSY;
767 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
768 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
769 if (!ret)
770 *valp = t3_read_reg(adapter, A_SF_DATA);
771 return ret;
772}
773
774/**
775 * sf1_write - write data to the serial flash
776 * @adapter: the adapter
777 * @byte_cnt: number of bytes to write
778 * @cont: whether another operation will be chained
779 * @val: value to write
780 *
781 * Writes up to 4 bytes of data to the serial flash. The location of
782 * the write needs to be specified prior to calling this by issuing the
783 * appropriate commands to the serial flash.
784 */
785static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
786 u32 val)
787{
788 if (!byte_cnt || byte_cnt > 4)
789 return -EINVAL;
790 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
791 return -EBUSY;
792 t3_write_reg(adapter, A_SF_DATA, val);
793 t3_write_reg(adapter, A_SF_OP,
794 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
795 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
796}
797
798/**
799 * flash_wait_op - wait for a flash operation to complete
800 * @adapter: the adapter
801 * @attempts: max number of polls of the status register
802 * @delay: delay between polls in ms
803 *
804 * Wait for a flash operation to complete by polling the status register.
805 */
806static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
807{
808 int ret;
809 u32 status;
810
811 while (1) {
812 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
813 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
814 return ret;
815 if (!(status & 1))
816 return 0;
817 if (--attempts == 0)
818 return -EAGAIN;
819 if (delay)
820 msleep(delay);
821 }
822}
823
824/**
825 * t3_read_flash - read words from serial flash
826 * @adapter: the adapter
827 * @addr: the start address for the read
828 * @nwords: how many 32-bit words to read
829 * @data: where to store the read data
830 * @byte_oriented: whether to store data as bytes or as words
831 *
832 * Read the specified number of 32-bit words from the serial flash.
833 * If @byte_oriented is set the read data is stored as a byte array
834 * (i.e., big-endian), otherwise as 32-bit words in the platform's
835 * natural endianess.
836 */
837int t3_read_flash(struct adapter *adapter, unsigned int addr,
838 unsigned int nwords, u32 *data, int byte_oriented)
839{
840 int ret;
841
842 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
843 return -EINVAL;
844
845 addr = swab32(addr) | SF_RD_DATA_FAST;
846
847 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
848 (ret = sf1_read(adapter, 1, 1, data)) != 0)
849 return ret;
850
851 for (; nwords; nwords--, data++) {
852 ret = sf1_read(adapter, 4, nwords > 1, data);
853 if (ret)
854 return ret;
855 if (byte_oriented)
856 *data = htonl(*data);
857 }
858 return 0;
859}
860
861/**
862 * t3_write_flash - write up to a page of data to the serial flash
863 * @adapter: the adapter
864 * @addr: the start address to write
865 * @n: length of data to write
866 * @data: the data to write
867 *
868 * Writes up to a page of data (256 bytes) to the serial flash starting
869 * at the given address.
870 */
871static int t3_write_flash(struct adapter *adapter, unsigned int addr,
872 unsigned int n, const u8 *data)
873{
874 int ret;
875 u32 buf[64];
876 unsigned int i, c, left, val, offset = addr & 0xff;
877
878 if (addr + n > SF_SIZE || offset + n > 256)
879 return -EINVAL;
880
881 val = swab32(addr) | SF_PROG_PAGE;
882
883 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
884 (ret = sf1_write(adapter, 4, 1, val)) != 0)
885 return ret;
886
887 for (left = n; left; left -= c) {
888 c = min(left, 4U);
889 for (val = 0, i = 0; i < c; ++i)
890 val = (val << 8) + *data++;
891
892 ret = sf1_write(adapter, c, c != left, val);
893 if (ret)
894 return ret;
895 }
896 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
897 return ret;
898
899 /* Read the page to verify the write succeeded */
900 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
901 if (ret)
902 return ret;
903
904 if (memcmp(data - n, (u8 *) buf + offset, n))
905 return -EIO;
906 return 0;
907}
908
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700909/**
Divy Le Ray47330072007-08-29 19:15:52 -0700910 * t3_get_tp_version - read the tp sram version
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700911 * @adapter: the adapter
Divy Le Ray47330072007-08-29 19:15:52 -0700912 * @vers: where to place the version
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700913 *
Divy Le Ray47330072007-08-29 19:15:52 -0700914 * Reads the protocol sram version from sram.
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700915 */
Divy Le Ray47330072007-08-29 19:15:52 -0700916int t3_get_tp_version(struct adapter *adapter, u32 *vers)
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700917{
918 int ret;
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700919
920 /* Get version loaded in SRAM */
921 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
922 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
923 1, 1, 5, 1);
924 if (ret)
925 return ret;
Jeff Garzik2eab17a2007-11-23 21:59:45 -0500926
Divy Le Ray47330072007-08-29 19:15:52 -0700927 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
928
929 return 0;
930}
931
932/**
933 * t3_check_tpsram_version - read the tp sram version
934 * @adapter: the adapter
Divy Le Ray47330072007-08-29 19:15:52 -0700935 *
936 * Reads the protocol sram version from flash.
937 */
Divy Le Ray8207bef2008-12-16 01:51:47 -0800938int t3_check_tpsram_version(struct adapter *adapter)
Divy Le Ray47330072007-08-29 19:15:52 -0700939{
940 int ret;
941 u32 vers;
942 unsigned int major, minor;
943
944 if (adapter->params.rev == T3_REV_A)
945 return 0;
946
Divy Le Ray47330072007-08-29 19:15:52 -0700947
948 ret = t3_get_tp_version(adapter, &vers);
949 if (ret)
950 return ret;
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700951
952 major = G_TP_VERSION_MAJOR(vers);
953 minor = G_TP_VERSION_MINOR(vers);
954
Jeff Garzik2eab17a2007-11-23 21:59:45 -0500955 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700956 return 0;
Divy Le Ray47330072007-08-29 19:15:52 -0700957 else {
Divy Le Ray47330072007-08-29 19:15:52 -0700958 CH_ERR(adapter, "found wrong TP version (%u.%u), "
959 "driver compiled for version %d.%d\n", major, minor,
960 TP_VERSION_MAJOR, TP_VERSION_MINOR);
961 }
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700962 return -EINVAL;
963}
964
965/**
Jeff Garzik2eab17a2007-11-23 21:59:45 -0500966 * t3_check_tpsram - check if provided protocol SRAM
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700967 * is compatible with this driver
968 * @adapter: the adapter
969 * @tp_sram: the firmware image to write
970 * @size: image size
971 *
972 * Checks if an adapter's tp sram is compatible with the driver.
973 * Returns 0 if the versions are compatible, a negative error otherwise.
974 */
David Woodhouse2c733a12008-05-24 00:10:55 +0100975int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
976 unsigned int size)
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700977{
978 u32 csum;
979 unsigned int i;
Al Viro05e5c112007-12-22 18:56:23 +0000980 const __be32 *p = (const __be32 *)tp_sram;
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700981
982 /* Verify checksum */
983 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
984 csum += ntohl(p[i]);
985 if (csum != 0xffffffff) {
986 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
987 csum);
988 return -EINVAL;
989 }
990
991 return 0;
992}
993
Divy Le Ray4aac3892007-01-30 19:43:45 -0800994enum fw_version_type {
995 FW_VERSION_N3,
996 FW_VERSION_T3
997};
998
Divy Le Ray4d22de32007-01-18 22:04:14 -0500999/**
1000 * t3_get_fw_version - read the firmware version
1001 * @adapter: the adapter
1002 * @vers: where to place the version
1003 *
1004 * Reads the FW version from flash.
1005 */
1006int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1007{
1008 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1009}
1010
1011/**
1012 * t3_check_fw_version - check if the FW is compatible with this driver
1013 * @adapter: the adapter
Divy Le Ray8207bef2008-12-16 01:51:47 -08001014 *
Divy Le Ray4d22de32007-01-18 22:04:14 -05001015 * Checks if an adapter's FW is compatible with the driver. Returns 0
1016 * if the versions are compatible, a negative error otherwise.
1017 */
Divy Le Ray8207bef2008-12-16 01:51:47 -08001018int t3_check_fw_version(struct adapter *adapter)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001019{
1020 int ret;
1021 u32 vers;
Divy Le Ray4aac3892007-01-30 19:43:45 -08001022 unsigned int type, major, minor;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001023
1024 ret = t3_get_fw_version(adapter, &vers);
1025 if (ret)
1026 return ret;
1027
Divy Le Ray4aac3892007-01-30 19:43:45 -08001028 type = G_FW_VERSION_TYPE(vers);
1029 major = G_FW_VERSION_MAJOR(vers);
1030 minor = G_FW_VERSION_MINOR(vers);
1031
Divy Le Ray75d86262007-02-25 16:32:37 -08001032 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1033 minor == FW_VERSION_MINOR)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001034 return 0;
Divy Le Ray8207bef2008-12-16 01:51:47 -08001035 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
Divy Le Ray273fa902007-11-16 11:22:00 -08001036 CH_WARN(adapter, "found old FW minor version(%u.%u), "
Divy Le Raya5a3b462007-09-05 15:58:09 -07001037 "driver compiled for version %u.%u\n", major, minor,
1038 FW_VERSION_MAJOR, FW_VERSION_MINOR);
Divy Le Ray8207bef2008-12-16 01:51:47 -08001039 else {
Divy Le Ray273fa902007-11-16 11:22:00 -08001040 CH_WARN(adapter, "found newer FW version(%u.%u), "
1041 "driver compiled for version %u.%u\n", major, minor,
1042 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1043 return 0;
Divy Le Raya5a3b462007-09-05 15:58:09 -07001044 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001045 return -EINVAL;
1046}
1047
1048/**
1049 * t3_flash_erase_sectors - erase a range of flash sectors
1050 * @adapter: the adapter
1051 * @start: the first sector to erase
1052 * @end: the last sector to erase
1053 *
1054 * Erases the sectors in the given range.
1055 */
1056static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1057{
1058 while (start <= end) {
1059 int ret;
1060
1061 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1062 (ret = sf1_write(adapter, 4, 0,
1063 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1064 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1065 return ret;
1066 start++;
1067 }
1068 return 0;
1069}
1070
1071/*
1072 * t3_load_fw - download firmware
1073 * @adapter: the adapter
Divy Le Ray8a9fab22007-05-30 21:10:52 -07001074 * @fw_data: the firmware image to write
Divy Le Ray4d22de32007-01-18 22:04:14 -05001075 * @size: image size
1076 *
1077 * Write the supplied firmware image to the card's serial flash.
1078 * The FW image has the following sections: @size - 8 bytes of code and
1079 * data, followed by 4 bytes of FW version, followed by the 32-bit
1080 * 1's complement checksum of the whole image.
1081 */
1082int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1083{
1084 u32 csum;
1085 unsigned int i;
Al Viro05e5c112007-12-22 18:56:23 +00001086 const __be32 *p = (const __be32 *)fw_data;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001087 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1088
Divy Le Ray2e283962007-03-18 13:10:06 -07001089 if ((size & 3) || size < FW_MIN_SIZE)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001090 return -EINVAL;
1091 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1092 return -EFBIG;
1093
1094 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1095 csum += ntohl(p[i]);
1096 if (csum != 0xffffffff) {
1097 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1098 csum);
1099 return -EINVAL;
1100 }
1101
1102 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1103 if (ret)
1104 goto out;
1105
1106 size -= 8; /* trim off version and checksum */
1107 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1108 unsigned int chunk_size = min(size, 256U);
1109
1110 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1111 if (ret)
1112 goto out;
1113
1114 addr += chunk_size;
1115 fw_data += chunk_size;
1116 size -= chunk_size;
1117 }
1118
1119 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1120out:
1121 if (ret)
1122 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1123 return ret;
1124}
1125
1126#define CIM_CTL_BASE 0x2000
1127
1128/**
1129 * t3_cim_ctl_blk_read - read a block from CIM control region
1130 *
1131 * @adap: the adapter
1132 * @addr: the start address within the CIM control region
1133 * @n: number of words to read
1134 * @valp: where to store the result
1135 *
1136 * Reads a block of 4-byte words from the CIM control region.
1137 */
1138int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1139 unsigned int n, unsigned int *valp)
1140{
1141 int ret = 0;
1142
1143 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1144 return -EBUSY;
1145
1146 for ( ; !ret && n--; addr += 4) {
1147 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1148 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1149 0, 5, 2);
1150 if (!ret)
1151 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1152 }
1153 return ret;
1154}
1155
1156
1157/**
1158 * t3_link_changed - handle interface link changes
1159 * @adapter: the adapter
1160 * @port_id: the port index that changed link state
1161 *
1162 * Called when a port's link settings change to propagate the new values
1163 * to the associated PHY and MAC. After performing the common tasks it
1164 * invokes an OS-specific handler.
1165 */
1166void t3_link_changed(struct adapter *adapter, int port_id)
1167{
1168 int link_ok, speed, duplex, fc;
1169 struct port_info *pi = adap2pinfo(adapter, port_id);
1170 struct cphy *phy = &pi->phy;
1171 struct cmac *mac = &pi->mac;
1172 struct link_config *lc = &pi->link_config;
1173
1174 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1175
Divy Le Ray9b1e3652008-10-08 17:39:31 -07001176 if (lc->requested_fc & PAUSE_AUTONEG)
1177 fc &= lc->requested_fc;
1178 else
1179 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1180
1181 if (link_ok == lc->link_ok && speed == lc->speed &&
1182 duplex == lc->duplex && fc == lc->fc)
1183 return; /* nothing changed */
1184
Divy Le Ray4d22de32007-01-18 22:04:14 -05001185 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1186 uses_xaui(adapter)) {
1187 if (link_ok)
1188 t3b_pcs_reset(mac);
1189 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1190 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1191 }
1192 lc->link_ok = link_ok;
1193 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1194 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001195
1196 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1197 /* Set MAC speed, duplex, and flow control to match PHY. */
1198 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1199 lc->fc = fc;
1200 }
1201
1202 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1203}
1204
1205/**
1206 * t3_link_start - apply link configuration to MAC/PHY
1207 * @phy: the PHY to setup
1208 * @mac: the MAC to setup
1209 * @lc: the requested link configuration
1210 *
1211 * Set up a port's MAC and PHY according to a desired link configuration.
1212 * - If the PHY can auto-negotiate first decide what to advertise, then
1213 * enable/disable auto-negotiation as desired, and reset.
1214 * - If the PHY does not auto-negotiate just reset it.
1215 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1216 * otherwise do it later based on the outcome of auto-negotiation.
1217 */
1218int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1219{
1220 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1221
1222 lc->link_ok = 0;
1223 if (lc->supported & SUPPORTED_Autoneg) {
1224 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1225 if (fc) {
1226 lc->advertising |= ADVERTISED_Asym_Pause;
1227 if (fc & PAUSE_RX)
1228 lc->advertising |= ADVERTISED_Pause;
1229 }
1230 phy->ops->advertise(phy, lc->advertising);
1231
1232 if (lc->autoneg == AUTONEG_DISABLE) {
1233 lc->speed = lc->requested_speed;
1234 lc->duplex = lc->requested_duplex;
1235 lc->fc = (unsigned char)fc;
1236 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1237 fc);
1238 /* Also disables autoneg */
1239 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001240 } else
1241 phy->ops->autoneg_enable(phy);
1242 } else {
1243 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1244 lc->fc = (unsigned char)fc;
1245 phy->ops->reset(phy, 0);
1246 }
1247 return 0;
1248}
1249
1250/**
1251 * t3_set_vlan_accel - control HW VLAN extraction
1252 * @adapter: the adapter
1253 * @ports: bitmap of adapter ports to operate on
1254 * @on: enable (1) or disable (0) HW VLAN extraction
1255 *
1256 * Enables or disables HW extraction of VLAN tags for the given port.
1257 */
1258void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1259{
1260 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1261 ports << S_VLANEXTRACTIONENABLE,
1262 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1263}
1264
1265struct intr_info {
1266 unsigned int mask; /* bits to check in interrupt status */
1267 const char *msg; /* message to print or NULL */
1268 short stat_idx; /* stat counter to increment or -1 */
Divy Le Ray20d3fc12008-10-08 17:36:03 -07001269 unsigned short fatal; /* whether the condition reported is fatal */
Divy Le Ray4d22de32007-01-18 22:04:14 -05001270};
1271
1272/**
1273 * t3_handle_intr_status - table driven interrupt handler
1274 * @adapter: the adapter that generated the interrupt
1275 * @reg: the interrupt status register to process
1276 * @mask: a mask to apply to the interrupt status
1277 * @acts: table of interrupt actions
1278 * @stats: statistics counters tracking interrupt occurences
1279 *
1280 * A table driven interrupt handler that applies a set of masks to an
1281 * interrupt status word and performs the corresponding actions if the
1282 * interrupts described by the mask have occured. The actions include
1283 * optionally printing a warning or alert message, and optionally
1284 * incrementing a stat counter. The table is terminated by an entry
1285 * specifying mask 0. Returns the number of fatal interrupt conditions.
1286 */
1287static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1288 unsigned int mask,
1289 const struct intr_info *acts,
1290 unsigned long *stats)
1291{
1292 int fatal = 0;
1293 unsigned int status = t3_read_reg(adapter, reg) & mask;
1294
1295 for (; acts->mask; ++acts) {
1296 if (!(status & acts->mask))
1297 continue;
1298 if (acts->fatal) {
1299 fatal++;
1300 CH_ALERT(adapter, "%s (0x%x)\n",
1301 acts->msg, status & acts->mask);
1302 } else if (acts->msg)
1303 CH_WARN(adapter, "%s (0x%x)\n",
1304 acts->msg, status & acts->mask);
1305 if (acts->stat_idx >= 0)
1306 stats[acts->stat_idx]++;
1307 }
1308 if (status) /* clear processed interrupts */
1309 t3_write_reg(adapter, reg, status);
1310 return fatal;
1311}
1312
Divy Le Rayb8819552007-12-17 18:47:31 -08001313#define SGE_INTR_MASK (F_RSPQDISABLED | \
1314 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1315 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1316 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1317 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1318 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1319 F_HIRCQPARITYERROR)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001320#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1321 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1322 F_NFASRCHFAIL)
1323#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1324#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1325 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
Divy Le Rayfc8821962009-03-12 21:14:09 +00001326 F_TXFIFO_UNDERRUN)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001327#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1328 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1329 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1330 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1331 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1332 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1333#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1334 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1335 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
Divy Le Rayb8819552007-12-17 18:47:31 -08001336 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1337 F_TXPARERR | V_BISTERR(M_BISTERR))
1338#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1339 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1340 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1341#define ULPTX_INTR_MASK 0xfc
1342#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
Divy Le Ray4d22de32007-01-18 22:04:14 -05001343 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1344 F_ZERO_SWITCH_ERROR)
1345#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1346 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1347 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
Divy Le Rayb8819552007-12-17 18:47:31 -08001348 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1349 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1350 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1351 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1352 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001353#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1354 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1355 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1356#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1357 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1358 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1359#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1360 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1361 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1362 V_MCAPARERRENB(M_MCAPARERRENB))
1363#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1364 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1365 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1366 F_MPS0 | F_CPL_SWITCH)
1367
1368/*
1369 * Interrupt handler for the PCIX1 module.
1370 */
1371static void pci_intr_handler(struct adapter *adapter)
1372{
1373 static const struct intr_info pcix1_intr_info[] = {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001374 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1375 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1376 {F_RCVTARABT, "PCI received target abort", -1, 1},
1377 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1378 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1379 {F_DETPARERR, "PCI detected parity error", -1, 1},
1380 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1381 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1382 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1383 1},
1384 {F_DETCORECCERR, "PCI correctable ECC error",
1385 STAT_PCI_CORR_ECC, 0},
1386 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1387 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1388 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1389 1},
1390 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1391 1},
1392 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1393 1},
1394 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1395 "error", -1, 1},
1396 {0}
1397 };
1398
1399 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1400 pcix1_intr_info, adapter->irq_stats))
1401 t3_fatal_err(adapter);
1402}
1403
1404/*
1405 * Interrupt handler for the PCIE module.
1406 */
1407static void pcie_intr_handler(struct adapter *adapter)
1408{
1409 static const struct intr_info pcie_intr_info[] = {
Divy Le Rayb5a44bc2007-01-30 19:44:01 -08001410 {F_PEXERR, "PCI PEX error", -1, 1},
Divy Le Ray4d22de32007-01-18 22:04:14 -05001411 {F_UNXSPLCPLERRR,
1412 "PCI unexpected split completion DMA read error", -1, 1},
1413 {F_UNXSPLCPLERRC,
1414 "PCI unexpected split completion DMA command error", -1, 1},
1415 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1416 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1417 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1418 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1419 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1420 "PCI MSI-X table/PBA parity error", -1, 1},
Divy Le Rayb8819552007-12-17 18:47:31 -08001421 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1422 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1423 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1424 {F_TXPARERR, "PCI Tx parity error", -1, 1},
Divy Le Ray4d22de32007-01-18 22:04:14 -05001425 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1426 {0}
1427 };
1428
Divy Le Ray3eea3332007-09-05 15:58:15 -07001429 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1430 CH_ALERT(adapter, "PEX error code 0x%x\n",
1431 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1432
Divy Le Ray4d22de32007-01-18 22:04:14 -05001433 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1434 pcie_intr_info, adapter->irq_stats))
1435 t3_fatal_err(adapter);
1436}
1437
1438/*
1439 * TP interrupt handler.
1440 */
1441static void tp_intr_handler(struct adapter *adapter)
1442{
1443 static const struct intr_info tp_intr_info[] = {
1444 {0xffffff, "TP parity error", -1, 1},
1445 {0x1000000, "TP out of Rx pages", -1, 1},
1446 {0x2000000, "TP out of Tx pages", -1, 1},
1447 {0}
1448 };
1449
Divy Le Raya2604be2007-11-16 11:22:16 -08001450 static struct intr_info tp_intr_info_t3c[] = {
Divy Le Rayb8819552007-12-17 18:47:31 -08001451 {0x1fffffff, "TP parity error", -1, 1},
1452 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1453 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1454 {0}
Divy Le Raya2604be2007-11-16 11:22:16 -08001455 };
1456
Divy Le Ray4d22de32007-01-18 22:04:14 -05001457 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
Divy Le Raya2604be2007-11-16 11:22:16 -08001458 adapter->params.rev < T3_REV_C ?
Divy Le Rayb8819552007-12-17 18:47:31 -08001459 tp_intr_info : tp_intr_info_t3c, NULL))
Divy Le Ray4d22de32007-01-18 22:04:14 -05001460 t3_fatal_err(adapter);
1461}
1462
1463/*
1464 * CIM interrupt handler.
1465 */
1466static void cim_intr_handler(struct adapter *adapter)
1467{
1468 static const struct intr_info cim_intr_info[] = {
1469 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1470 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1471 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1472 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1473 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1474 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1475 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1476 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1477 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1478 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1479 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1480 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
Divy Le Rayb8819552007-12-17 18:47:31 -08001481 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1482 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1483 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1484 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1485 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1486 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1487 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1488 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1489 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1490 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1491 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1492 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
Divy Le Ray4d22de32007-01-18 22:04:14 -05001493 {0}
1494 };
1495
1496 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1497 cim_intr_info, NULL))
1498 t3_fatal_err(adapter);
1499}
1500
1501/*
1502 * ULP RX interrupt handler.
1503 */
1504static void ulprx_intr_handler(struct adapter *adapter)
1505{
1506 static const struct intr_info ulprx_intr_info[] = {
Divy Le Rayb8819552007-12-17 18:47:31 -08001507 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1508 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1509 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1510 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1511 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1512 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1513 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1514 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
Divy Le Ray4d22de32007-01-18 22:04:14 -05001515 {0}
1516 };
1517
1518 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1519 ulprx_intr_info, NULL))
1520 t3_fatal_err(adapter);
1521}
1522
1523/*
1524 * ULP TX interrupt handler.
1525 */
1526static void ulptx_intr_handler(struct adapter *adapter)
1527{
1528 static const struct intr_info ulptx_intr_info[] = {
1529 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1530 STAT_ULP_CH0_PBL_OOB, 0},
1531 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1532 STAT_ULP_CH1_PBL_OOB, 0},
Divy Le Rayb8819552007-12-17 18:47:31 -08001533 {0xfc, "ULP TX parity error", -1, 1},
Divy Le Ray4d22de32007-01-18 22:04:14 -05001534 {0}
1535 };
1536
1537 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1538 ulptx_intr_info, adapter->irq_stats))
1539 t3_fatal_err(adapter);
1540}
1541
1542#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1543 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1544 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1545 F_ICSPI1_TX_FRAMING_ERROR)
1546#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1547 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1548 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1549 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1550
1551/*
1552 * PM TX interrupt handler.
1553 */
1554static void pmtx_intr_handler(struct adapter *adapter)
1555{
1556 static const struct intr_info pmtx_intr_info[] = {
1557 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1558 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1559 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1560 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1561 "PMTX ispi parity error", -1, 1},
1562 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1563 "PMTX ospi parity error", -1, 1},
1564 {0}
1565 };
1566
1567 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1568 pmtx_intr_info, NULL))
1569 t3_fatal_err(adapter);
1570}
1571
1572#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1573 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1574 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1575 F_IESPI1_TX_FRAMING_ERROR)
1576#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1577 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1578 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1579 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1580
1581/*
1582 * PM RX interrupt handler.
1583 */
1584static void pmrx_intr_handler(struct adapter *adapter)
1585{
1586 static const struct intr_info pmrx_intr_info[] = {
1587 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1588 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1589 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1590 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1591 "PMRX ispi parity error", -1, 1},
1592 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1593 "PMRX ospi parity error", -1, 1},
1594 {0}
1595 };
1596
1597 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1598 pmrx_intr_info, NULL))
1599 t3_fatal_err(adapter);
1600}
1601
1602/*
1603 * CPL switch interrupt handler.
1604 */
1605static void cplsw_intr_handler(struct adapter *adapter)
1606{
1607 static const struct intr_info cplsw_intr_info[] = {
Divy Le Rayb8819552007-12-17 18:47:31 -08001608 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1609 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
Divy Le Ray4d22de32007-01-18 22:04:14 -05001610 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1611 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1612 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1613 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1614 {0}
1615 };
1616
1617 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1618 cplsw_intr_info, NULL))
1619 t3_fatal_err(adapter);
1620}
1621
1622/*
1623 * MPS interrupt handler.
1624 */
1625static void mps_intr_handler(struct adapter *adapter)
1626{
1627 static const struct intr_info mps_intr_info[] = {
1628 {0x1ff, "MPS parity error", -1, 1},
1629 {0}
1630 };
1631
1632 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1633 mps_intr_info, NULL))
1634 t3_fatal_err(adapter);
1635}
1636
1637#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1638
1639/*
1640 * MC7 interrupt handler.
1641 */
1642static void mc7_intr_handler(struct mc7 *mc7)
1643{
1644 struct adapter *adapter = mc7->adapter;
1645 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1646
1647 if (cause & F_CE) {
1648 mc7->stats.corr_err++;
1649 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1650 "data 0x%x 0x%x 0x%x\n", mc7->name,
1651 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1652 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1653 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1654 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1655 }
1656
1657 if (cause & F_UE) {
1658 mc7->stats.uncorr_err++;
1659 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1660 "data 0x%x 0x%x 0x%x\n", mc7->name,
1661 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1662 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1663 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1664 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1665 }
1666
1667 if (G_PE(cause)) {
1668 mc7->stats.parity_err++;
1669 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1670 mc7->name, G_PE(cause));
1671 }
1672
1673 if (cause & F_AE) {
1674 u32 addr = 0;
1675
1676 if (adapter->params.rev > 0)
1677 addr = t3_read_reg(adapter,
1678 mc7->offset + A_MC7_ERR_ADDR);
1679 mc7->stats.addr_err++;
1680 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1681 mc7->name, addr);
1682 }
1683
1684 if (cause & MC7_INTR_FATAL)
1685 t3_fatal_err(adapter);
1686
1687 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1688}
1689
1690#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1691 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1692/*
1693 * XGMAC interrupt handler.
1694 */
1695static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1696{
1697 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
Divy Le Rayfc8821962009-03-12 21:14:09 +00001698 /*
1699 * We mask out interrupt causes for which we're not taking interrupts.
1700 * This allows us to use polling logic to monitor some of the other
1701 * conditions when taking interrupts would impose too much load on the
1702 * system.
1703 */
1704 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1705 ~F_RXFIFO_OVERFLOW;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001706
1707 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1708 mac->stats.tx_fifo_parity_err++;
1709 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1710 }
1711 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1712 mac->stats.rx_fifo_parity_err++;
1713 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1714 }
1715 if (cause & F_TXFIFO_UNDERRUN)
1716 mac->stats.tx_fifo_urun++;
1717 if (cause & F_RXFIFO_OVERFLOW)
1718 mac->stats.rx_fifo_ovfl++;
1719 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1720 mac->stats.serdes_signal_loss++;
1721 if (cause & F_XAUIPCSCTCERR)
1722 mac->stats.xaui_pcs_ctc_err++;
1723 if (cause & F_XAUIPCSALIGNCHANGE)
1724 mac->stats.xaui_pcs_align_change++;
1725
1726 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1727 if (cause & XGM_INTR_FATAL)
1728 t3_fatal_err(adap);
1729 return cause != 0;
1730}
1731
1732/*
1733 * Interrupt handler for PHY events.
1734 */
1735int t3_phy_intr_handler(struct adapter *adapter)
1736{
Divy Le Ray4d22de32007-01-18 22:04:14 -05001737 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1738
1739 for_each_port(adapter, i) {
Divy Le Ray1ca03cb2007-04-17 11:06:36 -07001740 struct port_info *p = adap2pinfo(adapter, i);
1741
Divy Le Ray04497982008-10-08 17:38:29 -07001742 if (!(p->phy.caps & SUPPORTED_IRQ))
Divy Le Ray1ca03cb2007-04-17 11:06:36 -07001743 continue;
1744
Divy Le Rayf231e0a2008-10-08 17:39:00 -07001745 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
Divy Le Ray1ca03cb2007-04-17 11:06:36 -07001746 int phy_cause = p->phy.ops->intr_handler(&p->phy);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001747
1748 if (phy_cause & cphy_cause_link_change)
1749 t3_link_changed(adapter, i);
1750 if (phy_cause & cphy_cause_fifo_error)
Divy Le Ray1ca03cb2007-04-17 11:06:36 -07001751 p->phy.fifo_errors++;
Divy Le Ray1e882022008-10-08 17:40:07 -07001752 if (phy_cause & cphy_cause_module_change)
1753 t3_os_phymod_changed(adapter, i);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001754 }
1755 }
1756
1757 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1758 return 0;
1759}
1760
1761/*
1762 * T3 slow path (non-data) interrupt handler.
1763 */
1764int t3_slow_intr_handler(struct adapter *adapter)
1765{
1766 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1767
1768 cause &= adapter->slow_intr_mask;
1769 if (!cause)
1770 return 0;
1771 if (cause & F_PCIM0) {
1772 if (is_pcie(adapter))
1773 pcie_intr_handler(adapter);
1774 else
1775 pci_intr_handler(adapter);
1776 }
1777 if (cause & F_SGE3)
1778 t3_sge_err_intr_handler(adapter);
1779 if (cause & F_MC7_PMRX)
1780 mc7_intr_handler(&adapter->pmrx);
1781 if (cause & F_MC7_PMTX)
1782 mc7_intr_handler(&adapter->pmtx);
1783 if (cause & F_MC7_CM)
1784 mc7_intr_handler(&adapter->cm);
1785 if (cause & F_CIM)
1786 cim_intr_handler(adapter);
1787 if (cause & F_TP1)
1788 tp_intr_handler(adapter);
1789 if (cause & F_ULP2_RX)
1790 ulprx_intr_handler(adapter);
1791 if (cause & F_ULP2_TX)
1792 ulptx_intr_handler(adapter);
1793 if (cause & F_PM1_RX)
1794 pmrx_intr_handler(adapter);
1795 if (cause & F_PM1_TX)
1796 pmtx_intr_handler(adapter);
1797 if (cause & F_CPL_SWITCH)
1798 cplsw_intr_handler(adapter);
1799 if (cause & F_MPS0)
1800 mps_intr_handler(adapter);
1801 if (cause & F_MC5A)
1802 t3_mc5_intr_handler(&adapter->mc5);
1803 if (cause & F_XGMAC0_0)
1804 mac_intr_handler(adapter, 0);
1805 if (cause & F_XGMAC0_1)
1806 mac_intr_handler(adapter, 1);
1807 if (cause & F_T3DBG)
1808 t3_os_ext_intr_handler(adapter);
1809
1810 /* Clear the interrupts just processed. */
1811 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1812 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1813 return 1;
1814}
1815
Divy Le Rayf231e0a2008-10-08 17:39:00 -07001816static unsigned int calc_gpio_intr(struct adapter *adap)
1817{
1818 unsigned int i, gpi_intr = 0;
1819
1820 for_each_port(adap, i)
1821 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1822 adapter_info(adap)->gpio_intr[i])
1823 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1824 return gpi_intr;
1825}
1826
Divy Le Ray4d22de32007-01-18 22:04:14 -05001827/**
1828 * t3_intr_enable - enable interrupts
1829 * @adapter: the adapter whose interrupts should be enabled
1830 *
1831 * Enable interrupts by setting the interrupt enable registers of the
1832 * various HW modules and then enabling the top-level interrupt
1833 * concentrator.
1834 */
1835void t3_intr_enable(struct adapter *adapter)
1836{
1837 static const struct addr_val_pair intr_en_avp[] = {
1838 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1839 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1840 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1841 MC7_INTR_MASK},
1842 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1843 MC7_INTR_MASK},
1844 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1845 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
Divy Le Ray4d22de32007-01-18 22:04:14 -05001846 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1847 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1848 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1849 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1850 };
1851
1852 adapter->slow_intr_mask = PL_INTR_MASK;
1853
1854 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
Divy Le Raya2604be2007-11-16 11:22:16 -08001855 t3_write_reg(adapter, A_TP_INT_ENABLE,
1856 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001857
1858 if (adapter->params.rev > 0) {
1859 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1860 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1861 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1862 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1863 F_PBL_BOUND_ERR_CH1);
1864 } else {
1865 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1866 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1867 }
1868
Divy Le Rayf231e0a2008-10-08 17:39:00 -07001869 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1870
Divy Le Ray4d22de32007-01-18 22:04:14 -05001871 if (is_pcie(adapter))
1872 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1873 else
1874 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1875 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1876 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1877}
1878
1879/**
1880 * t3_intr_disable - disable a card's interrupts
1881 * @adapter: the adapter whose interrupts should be disabled
1882 *
1883 * Disable interrupts. We only disable the top-level interrupt
1884 * concentrator and the SGE data interrupts.
1885 */
1886void t3_intr_disable(struct adapter *adapter)
1887{
1888 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1889 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1890 adapter->slow_intr_mask = 0;
1891}
1892
1893/**
1894 * t3_intr_clear - clear all interrupts
1895 * @adapter: the adapter whose interrupts should be cleared
1896 *
1897 * Clears all interrupts.
1898 */
1899void t3_intr_clear(struct adapter *adapter)
1900{
1901 static const unsigned int cause_reg_addr[] = {
1902 A_SG_INT_CAUSE,
1903 A_SG_RSPQ_FL_STATUS,
1904 A_PCIX_INT_CAUSE,
1905 A_MC7_INT_CAUSE,
1906 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1907 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1908 A_CIM_HOST_INT_CAUSE,
1909 A_TP_INT_CAUSE,
1910 A_MC5_DB_INT_CAUSE,
1911 A_ULPRX_INT_CAUSE,
1912 A_ULPTX_INT_CAUSE,
1913 A_CPL_INTR_CAUSE,
1914 A_PM1_TX_INT_CAUSE,
1915 A_PM1_RX_INT_CAUSE,
1916 A_MPS_INT_CAUSE,
1917 A_T3DBG_INT_CAUSE,
1918 };
1919 unsigned int i;
1920
1921 /* Clear PHY and MAC interrupts for each port. */
1922 for_each_port(adapter, i)
1923 t3_port_intr_clear(adapter, i);
1924
1925 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1926 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1927
Divy Le Ray3eea3332007-09-05 15:58:15 -07001928 if (is_pcie(adapter))
1929 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001930 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1931 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1932}
1933
1934/**
1935 * t3_port_intr_enable - enable port-specific interrupts
1936 * @adapter: associated adapter
1937 * @idx: index of port whose interrupts should be enabled
1938 *
1939 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1940 * adapter port.
1941 */
1942void t3_port_intr_enable(struct adapter *adapter, int idx)
1943{
1944 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1945
1946 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1947 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1948 phy->ops->intr_enable(phy);
1949}
1950
1951/**
1952 * t3_port_intr_disable - disable port-specific interrupts
1953 * @adapter: associated adapter
1954 * @idx: index of port whose interrupts should be disabled
1955 *
1956 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1957 * adapter port.
1958 */
1959void t3_port_intr_disable(struct adapter *adapter, int idx)
1960{
1961 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1962
1963 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1964 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1965 phy->ops->intr_disable(phy);
1966}
1967
1968/**
1969 * t3_port_intr_clear - clear port-specific interrupts
1970 * @adapter: associated adapter
1971 * @idx: index of port whose interrupts to clear
1972 *
1973 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1974 * adapter port.
1975 */
1976void t3_port_intr_clear(struct adapter *adapter, int idx)
1977{
1978 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1979
1980 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1981 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1982 phy->ops->intr_clear(phy);
1983}
1984
Divy Le Raybb9366a2007-09-05 15:58:30 -07001985#define SG_CONTEXT_CMD_ATTEMPTS 100
1986
Divy Le Ray4d22de32007-01-18 22:04:14 -05001987/**
1988 * t3_sge_write_context - write an SGE context
1989 * @adapter: the adapter
1990 * @id: the context id
1991 * @type: the context type
1992 *
1993 * Program an SGE context with the values already loaded in the
1994 * CONTEXT_DATA? registers.
1995 */
1996static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1997 unsigned int type)
1998{
1999 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2000 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2001 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2002 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2003 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2004 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2005 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002006 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002007}
2008
Divy Le Rayb8819552007-12-17 18:47:31 -08002009static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2010 unsigned int type)
2011{
2012 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2013 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2014 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2015 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2016 return t3_sge_write_context(adap, id, type);
2017}
2018
Divy Le Ray4d22de32007-01-18 22:04:14 -05002019/**
2020 * t3_sge_init_ecntxt - initialize an SGE egress context
2021 * @adapter: the adapter to configure
2022 * @id: the context id
2023 * @gts_enable: whether to enable GTS for the context
2024 * @type: the egress context type
2025 * @respq: associated response queue
2026 * @base_addr: base address of queue
2027 * @size: number of queue entries
2028 * @token: uP token
2029 * @gen: initial generation value for the context
2030 * @cidx: consumer pointer
2031 *
2032 * Initialize an SGE egress context and make it ready for use. If the
2033 * platform allows concurrent context operations, the caller is
2034 * responsible for appropriate locking.
2035 */
2036int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2037 enum sge_context_type type, int respq, u64 base_addr,
2038 unsigned int size, unsigned int token, int gen,
2039 unsigned int cidx)
2040{
2041 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2042
2043 if (base_addr & 0xfff) /* must be 4K aligned */
2044 return -EINVAL;
2045 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2046 return -EBUSY;
2047
2048 base_addr >>= 12;
2049 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2050 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2051 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2052 V_EC_BASE_LO(base_addr & 0xffff));
2053 base_addr >>= 16;
2054 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2055 base_addr >>= 32;
2056 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2057 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2058 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2059 F_EC_VALID);
2060 return t3_sge_write_context(adapter, id, F_EGRESS);
2061}
2062
2063/**
2064 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2065 * @adapter: the adapter to configure
2066 * @id: the context id
2067 * @gts_enable: whether to enable GTS for the context
2068 * @base_addr: base address of queue
2069 * @size: number of queue entries
2070 * @bsize: size of each buffer for this queue
2071 * @cong_thres: threshold to signal congestion to upstream producers
2072 * @gen: initial generation value for the context
2073 * @cidx: consumer pointer
2074 *
2075 * Initialize an SGE free list context and make it ready for use. The
2076 * caller is responsible for ensuring only one context operation occurs
2077 * at a time.
2078 */
2079int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2080 int gts_enable, u64 base_addr, unsigned int size,
2081 unsigned int bsize, unsigned int cong_thres, int gen,
2082 unsigned int cidx)
2083{
2084 if (base_addr & 0xfff) /* must be 4K aligned */
2085 return -EINVAL;
2086 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2087 return -EBUSY;
2088
2089 base_addr >>= 12;
2090 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2091 base_addr >>= 32;
2092 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2093 V_FL_BASE_HI((u32) base_addr) |
2094 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2095 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2096 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2097 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2098 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2099 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2100 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2101 return t3_sge_write_context(adapter, id, F_FREELIST);
2102}
2103
2104/**
2105 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2106 * @adapter: the adapter to configure
2107 * @id: the context id
2108 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2109 * @base_addr: base address of queue
2110 * @size: number of queue entries
2111 * @fl_thres: threshold for selecting the normal or jumbo free list
2112 * @gen: initial generation value for the context
2113 * @cidx: consumer pointer
2114 *
2115 * Initialize an SGE response queue context and make it ready for use.
2116 * The caller is responsible for ensuring only one context operation
2117 * occurs at a time.
2118 */
2119int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2120 int irq_vec_idx, u64 base_addr, unsigned int size,
2121 unsigned int fl_thres, int gen, unsigned int cidx)
2122{
2123 unsigned int intr = 0;
2124
2125 if (base_addr & 0xfff) /* must be 4K aligned */
2126 return -EINVAL;
2127 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2128 return -EBUSY;
2129
2130 base_addr >>= 12;
2131 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2132 V_CQ_INDEX(cidx));
2133 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2134 base_addr >>= 32;
2135 if (irq_vec_idx >= 0)
2136 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2137 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2138 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2139 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2140 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2141}
2142
2143/**
2144 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2145 * @adapter: the adapter to configure
2146 * @id: the context id
2147 * @base_addr: base address of queue
2148 * @size: number of queue entries
2149 * @rspq: response queue for async notifications
2150 * @ovfl_mode: CQ overflow mode
2151 * @credits: completion queue credits
2152 * @credit_thres: the credit threshold
2153 *
2154 * Initialize an SGE completion queue context and make it ready for use.
2155 * The caller is responsible for ensuring only one context operation
2156 * occurs at a time.
2157 */
2158int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2159 unsigned int size, int rspq, int ovfl_mode,
2160 unsigned int credits, unsigned int credit_thres)
2161{
2162 if (base_addr & 0xfff) /* must be 4K aligned */
2163 return -EINVAL;
2164 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2165 return -EBUSY;
2166
2167 base_addr >>= 12;
2168 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2169 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2170 base_addr >>= 32;
2171 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2172 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
Divy Le Ray1c17ae82007-09-05 15:58:25 -07002173 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2174 V_CQ_ERR(ovfl_mode));
Divy Le Ray4d22de32007-01-18 22:04:14 -05002175 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2176 V_CQ_CREDIT_THRES(credit_thres));
2177 return t3_sge_write_context(adapter, id, F_CQ);
2178}
2179
2180/**
2181 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2182 * @adapter: the adapter
2183 * @id: the egress context id
2184 * @enable: enable (1) or disable (0) the context
2185 *
2186 * Enable or disable an SGE egress context. The caller is responsible for
2187 * ensuring only one context operation occurs at a time.
2188 */
2189int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2190{
2191 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2192 return -EBUSY;
2193
2194 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2195 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2196 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2197 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2198 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2199 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2200 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2201 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002202 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002203}
2204
2205/**
2206 * t3_sge_disable_fl - disable an SGE free-buffer list
2207 * @adapter: the adapter
2208 * @id: the free list context id
2209 *
2210 * Disable an SGE free-buffer list. The caller is responsible for
2211 * ensuring only one context operation occurs at a time.
2212 */
2213int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2214{
2215 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2216 return -EBUSY;
2217
2218 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2219 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2220 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2221 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2222 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2223 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2224 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2225 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002226 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002227}
2228
2229/**
2230 * t3_sge_disable_rspcntxt - disable an SGE response queue
2231 * @adapter: the adapter
2232 * @id: the response queue context id
2233 *
2234 * Disable an SGE response queue. The caller is responsible for
2235 * ensuring only one context operation occurs at a time.
2236 */
2237int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2238{
2239 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2240 return -EBUSY;
2241
2242 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2243 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2244 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2245 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2246 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2247 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2248 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2249 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002250 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002251}
2252
2253/**
2254 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2255 * @adapter: the adapter
2256 * @id: the completion queue context id
2257 *
2258 * Disable an SGE completion queue. The caller is responsible for
2259 * ensuring only one context operation occurs at a time.
2260 */
2261int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2262{
2263 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2264 return -EBUSY;
2265
2266 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2267 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2268 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2269 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2270 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2271 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2272 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2273 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002274 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002275}
2276
2277/**
2278 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2279 * @adapter: the adapter
2280 * @id: the context id
2281 * @op: the operation to perform
2282 *
2283 * Perform the selected operation on an SGE completion queue context.
2284 * The caller is responsible for ensuring only one context operation
2285 * occurs at a time.
2286 */
2287int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2288 unsigned int credits)
2289{
2290 u32 val;
2291
2292 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2293 return -EBUSY;
2294
2295 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2296 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2297 V_CONTEXT(id) | F_CQ);
2298 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002299 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
Divy Le Ray4d22de32007-01-18 22:04:14 -05002300 return -EIO;
2301
2302 if (op >= 2 && op < 7) {
2303 if (adapter->params.rev > 0)
2304 return G_CQ_INDEX(val);
2305
2306 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2307 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2308 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002309 F_CONTEXT_CMD_BUSY, 0,
2310 SG_CONTEXT_CMD_ATTEMPTS, 1))
Divy Le Ray4d22de32007-01-18 22:04:14 -05002311 return -EIO;
2312 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2313 }
2314 return 0;
2315}
2316
2317/**
2318 * t3_sge_read_context - read an SGE context
2319 * @type: the context type
2320 * @adapter: the adapter
2321 * @id: the context id
2322 * @data: holds the retrieved context
2323 *
2324 * Read an SGE egress context. The caller is responsible for ensuring
2325 * only one context operation occurs at a time.
2326 */
2327static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2328 unsigned int id, u32 data[4])
2329{
2330 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2331 return -EBUSY;
2332
2333 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2334 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2335 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002336 SG_CONTEXT_CMD_ATTEMPTS, 1))
Divy Le Ray4d22de32007-01-18 22:04:14 -05002337 return -EIO;
2338 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2339 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2340 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2341 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2342 return 0;
2343}
2344
2345/**
2346 * t3_sge_read_ecntxt - read an SGE egress context
2347 * @adapter: the adapter
2348 * @id: the context id
2349 * @data: holds the retrieved context
2350 *
2351 * Read an SGE egress context. The caller is responsible for ensuring
2352 * only one context operation occurs at a time.
2353 */
2354int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2355{
2356 if (id >= 65536)
2357 return -EINVAL;
2358 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2359}
2360
2361/**
2362 * t3_sge_read_cq - read an SGE CQ context
2363 * @adapter: the adapter
2364 * @id: the context id
2365 * @data: holds the retrieved context
2366 *
2367 * Read an SGE CQ context. The caller is responsible for ensuring
2368 * only one context operation occurs at a time.
2369 */
2370int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2371{
2372 if (id >= 65536)
2373 return -EINVAL;
2374 return t3_sge_read_context(F_CQ, adapter, id, data);
2375}
2376
2377/**
2378 * t3_sge_read_fl - read an SGE free-list context
2379 * @adapter: the adapter
2380 * @id: the context id
2381 * @data: holds the retrieved context
2382 *
2383 * Read an SGE free-list context. The caller is responsible for ensuring
2384 * only one context operation occurs at a time.
2385 */
2386int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2387{
2388 if (id >= SGE_QSETS * 2)
2389 return -EINVAL;
2390 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2391}
2392
2393/**
2394 * t3_sge_read_rspq - read an SGE response queue context
2395 * @adapter: the adapter
2396 * @id: the context id
2397 * @data: holds the retrieved context
2398 *
2399 * Read an SGE response queue context. The caller is responsible for
2400 * ensuring only one context operation occurs at a time.
2401 */
2402int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2403{
2404 if (id >= SGE_QSETS)
2405 return -EINVAL;
2406 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2407}
2408
2409/**
2410 * t3_config_rss - configure Rx packet steering
2411 * @adapter: the adapter
2412 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2413 * @cpus: values for the CPU lookup table (0xff terminated)
2414 * @rspq: values for the response queue lookup table (0xffff terminated)
2415 *
2416 * Programs the receive packet steering logic. @cpus and @rspq provide
2417 * the values for the CPU and response queue lookup tables. If they
2418 * provide fewer values than the size of the tables the supplied values
2419 * are used repeatedly until the tables are fully populated.
2420 */
2421void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2422 const u8 * cpus, const u16 *rspq)
2423{
2424 int i, j, cpu_idx = 0, q_idx = 0;
2425
2426 if (cpus)
2427 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2428 u32 val = i << 16;
2429
2430 for (j = 0; j < 2; ++j) {
2431 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2432 if (cpus[cpu_idx] == 0xff)
2433 cpu_idx = 0;
2434 }
2435 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2436 }
2437
2438 if (rspq)
2439 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2440 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2441 (i << 16) | rspq[q_idx++]);
2442 if (rspq[q_idx] == 0xffff)
2443 q_idx = 0;
2444 }
2445
2446 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2447}
2448
2449/**
2450 * t3_read_rss - read the contents of the RSS tables
2451 * @adapter: the adapter
2452 * @lkup: holds the contents of the RSS lookup table
2453 * @map: holds the contents of the RSS map table
2454 *
2455 * Reads the contents of the receive packet steering tables.
2456 */
2457int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2458{
2459 int i;
2460 u32 val;
2461
2462 if (lkup)
2463 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2464 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2465 0xffff0000 | i);
2466 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2467 if (!(val & 0x80000000))
2468 return -EAGAIN;
2469 *lkup++ = val;
2470 *lkup++ = (val >> 8);
2471 }
2472
2473 if (map)
2474 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2475 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2476 0xffff0000 | i);
2477 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2478 if (!(val & 0x80000000))
2479 return -EAGAIN;
2480 *map++ = val;
2481 }
2482 return 0;
2483}
2484
2485/**
2486 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2487 * @adap: the adapter
2488 * @enable: 1 to select offload mode, 0 for regular NIC
2489 *
2490 * Switches TP to NIC/offload mode.
2491 */
2492void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2493{
2494 if (is_offload(adap) || !enable)
2495 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2496 V_NICMODE(!enable));
2497}
2498
2499/**
2500 * pm_num_pages - calculate the number of pages of the payload memory
2501 * @mem_size: the size of the payload memory
2502 * @pg_size: the size of each payload memory page
2503 *
2504 * Calculate the number of pages, each of the given size, that fit in a
2505 * memory of the specified size, respecting the HW requirement that the
2506 * number of pages must be a multiple of 24.
2507 */
2508static inline unsigned int pm_num_pages(unsigned int mem_size,
2509 unsigned int pg_size)
2510{
2511 unsigned int n = mem_size / pg_size;
2512
2513 return n - n % 24;
2514}
2515
2516#define mem_region(adap, start, size, reg) \
2517 t3_write_reg((adap), A_ ## reg, (start)); \
2518 start += size
2519
Divy Le Rayb8819552007-12-17 18:47:31 -08002520/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05002521 * partition_mem - partition memory and configure TP memory settings
2522 * @adap: the adapter
2523 * @p: the TP parameters
2524 *
2525 * Partitions context and payload memory and configures TP's memory
2526 * registers.
2527 */
2528static void partition_mem(struct adapter *adap, const struct tp_params *p)
2529{
2530 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2531 unsigned int timers = 0, timers_shift = 22;
2532
2533 if (adap->params.rev > 0) {
2534 if (tids <= 16 * 1024) {
2535 timers = 1;
2536 timers_shift = 16;
2537 } else if (tids <= 64 * 1024) {
2538 timers = 2;
2539 timers_shift = 18;
2540 } else if (tids <= 256 * 1024) {
2541 timers = 3;
2542 timers_shift = 20;
2543 }
2544 }
2545
2546 t3_write_reg(adap, A_TP_PMM_SIZE,
2547 p->chan_rx_size | (p->chan_tx_size >> 16));
2548
2549 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2550 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2551 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2552 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2553 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2554
2555 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2556 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2557 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2558
2559 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2560 /* Add a bit of headroom and make multiple of 24 */
2561 pstructs += 48;
2562 pstructs -= pstructs % 24;
2563 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2564
2565 m = tids * TCB_SIZE;
2566 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2567 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2568 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2569 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2570 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2571 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2572 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2573 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2574
2575 m = (m + 4095) & ~0xfff;
2576 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2577 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2578
2579 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2580 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2581 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2582 if (tids < m)
2583 adap->params.mc5.nservers += m - tids;
2584}
2585
2586static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2587 u32 val)
2588{
2589 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2590 t3_write_reg(adap, A_TP_PIO_DATA, val);
2591}
2592
2593static void tp_config(struct adapter *adap, const struct tp_params *p)
2594{
Divy Le Ray4d22de32007-01-18 22:04:14 -05002595 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2596 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2597 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2598 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2599 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002600 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
Divy Le Ray4d22de32007-01-18 22:04:14 -05002601 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2602 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2603 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2604 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
Divy Le Rayb8819552007-12-17 18:47:31 -08002605 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
Divy Le Ray4d22de32007-01-18 22:04:14 -05002606 F_IPV6ENABLE | F_NICMODE);
2607 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2608 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002609 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2610 adap->params.rev > 0 ? F_ENABLEESND :
2611 F_T3A_ENABLEESND);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002612
Divy Le Ray3b1d3072007-01-30 19:44:07 -08002613 t3_set_reg_field(adap, A_TP_PC_CONFIG,
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002614 F_ENABLEEPCMDAFULL,
2615 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2616 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
Divy Le Rayb8819552007-12-17 18:47:31 -08002617 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2618 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2619 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002620 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2621 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
Jeff Garzik2eab17a2007-11-23 21:59:45 -05002622
Divy Le Ray4d22de32007-01-18 22:04:14 -05002623 if (adap->params.rev > 0) {
2624 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2625 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2626 F_TXPACEAUTO);
2627 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2628 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2629 } else
2630 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2631
Divy Le Raya2604be2007-11-16 11:22:16 -08002632 if (adap->params.rev == T3_REV_C)
2633 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2634 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2635 V_TABLELATENCYDELTA(4));
2636
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002637 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2638 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2639 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2640 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002641}
2642
2643/* Desired TP timer resolution in usec */
2644#define TP_TMR_RES 50
2645
2646/* TCP timer values in ms */
2647#define TP_DACK_TIMER 50
2648#define TP_RTO_MIN 250
2649
2650/**
2651 * tp_set_timers - set TP timing parameters
2652 * @adap: the adapter to set
2653 * @core_clk: the core clock frequency in Hz
2654 *
2655 * Set TP's timing parameters, such as the various timer resolutions and
2656 * the TCP timer values.
2657 */
2658static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2659{
2660 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2661 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2662 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2663 unsigned int tps = core_clk >> tre;
2664
2665 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2666 V_DELAYEDACKRESOLUTION(dack_re) |
2667 V_TIMESTAMPRESOLUTION(tstamp_re));
2668 t3_write_reg(adap, A_TP_DACK_TIMER,
2669 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2670 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2671 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2672 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2673 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2674 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2675 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2676 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2677 V_KEEPALIVEMAX(9));
2678
2679#define SECONDS * tps
2680
2681 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2682 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2683 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2684 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2685 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2686 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2687 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2688 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2689 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2690
2691#undef SECONDS
2692}
2693
2694/**
2695 * t3_tp_set_coalescing_size - set receive coalescing size
2696 * @adap: the adapter
2697 * @size: the receive coalescing size
2698 * @psh: whether a set PSH bit should deliver coalesced data
2699 *
2700 * Set the receive coalescing size and PSH bit handling.
2701 */
2702int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2703{
2704 u32 val;
2705
2706 if (size > MAX_RX_COALESCING_LEN)
2707 return -EINVAL;
2708
2709 val = t3_read_reg(adap, A_TP_PARA_REG3);
2710 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2711
2712 if (size) {
2713 val |= F_RXCOALESCEENABLE;
2714 if (psh)
2715 val |= F_RXCOALESCEPSHEN;
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002716 size = min(MAX_RX_COALESCING_LEN, size);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002717 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2718 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2719 }
2720 t3_write_reg(adap, A_TP_PARA_REG3, val);
2721 return 0;
2722}
2723
2724/**
2725 * t3_tp_set_max_rxsize - set the max receive size
2726 * @adap: the adapter
2727 * @size: the max receive size
2728 *
2729 * Set TP's max receive size. This is the limit that applies when
2730 * receive coalescing is disabled.
2731 */
2732void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2733{
2734 t3_write_reg(adap, A_TP_PARA_REG7,
2735 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2736}
2737
Roland Dreier7b9b0942008-01-29 14:45:11 -08002738static void init_mtus(unsigned short mtus[])
Divy Le Ray4d22de32007-01-18 22:04:14 -05002739{
2740 /*
2741 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2742 * it can accomodate max size TCP/IP headers when SACK and timestamps
2743 * are enabled and still have at least 8 bytes of payload.
2744 */
Divy Le Ray75758e82007-12-05 10:15:01 -08002745 mtus[0] = 88;
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002746 mtus[1] = 88;
2747 mtus[2] = 256;
2748 mtus[3] = 512;
2749 mtus[4] = 576;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002750 mtus[5] = 1024;
2751 mtus[6] = 1280;
2752 mtus[7] = 1492;
2753 mtus[8] = 1500;
2754 mtus[9] = 2002;
2755 mtus[10] = 2048;
2756 mtus[11] = 4096;
2757 mtus[12] = 4352;
2758 mtus[13] = 8192;
2759 mtus[14] = 9000;
2760 mtus[15] = 9600;
2761}
2762
2763/*
2764 * Initial congestion control parameters.
2765 */
Roland Dreier7b9b0942008-01-29 14:45:11 -08002766static void init_cong_ctrl(unsigned short *a, unsigned short *b)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002767{
2768 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2769 a[9] = 2;
2770 a[10] = 3;
2771 a[11] = 4;
2772 a[12] = 5;
2773 a[13] = 6;
2774 a[14] = 7;
2775 a[15] = 8;
2776 a[16] = 9;
2777 a[17] = 10;
2778 a[18] = 14;
2779 a[19] = 17;
2780 a[20] = 21;
2781 a[21] = 25;
2782 a[22] = 30;
2783 a[23] = 35;
2784 a[24] = 45;
2785 a[25] = 60;
2786 a[26] = 80;
2787 a[27] = 100;
2788 a[28] = 200;
2789 a[29] = 300;
2790 a[30] = 400;
2791 a[31] = 500;
2792
2793 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2794 b[9] = b[10] = 1;
2795 b[11] = b[12] = 2;
2796 b[13] = b[14] = b[15] = b[16] = 3;
2797 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2798 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2799 b[28] = b[29] = 6;
2800 b[30] = b[31] = 7;
2801}
2802
2803/* The minimum additive increment value for the congestion control table */
2804#define CC_MIN_INCR 2U
2805
2806/**
2807 * t3_load_mtus - write the MTU and congestion control HW tables
2808 * @adap: the adapter
2809 * @mtus: the unrestricted values for the MTU table
2810 * @alphs: the values for the congestion control alpha parameter
2811 * @beta: the values for the congestion control beta parameter
2812 * @mtu_cap: the maximum permitted effective MTU
2813 *
2814 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2815 * Update the high-speed congestion control table with the supplied alpha,
2816 * beta, and MTUs.
2817 */
2818void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2819 unsigned short alpha[NCCTRL_WIN],
2820 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2821{
2822 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2823 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2824 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2825 28672, 40960, 57344, 81920, 114688, 163840, 229376
2826 };
2827
2828 unsigned int i, w;
2829
2830 for (i = 0; i < NMTUS; ++i) {
2831 unsigned int mtu = min(mtus[i], mtu_cap);
2832 unsigned int log2 = fls(mtu);
2833
2834 if (!(mtu & ((1 << log2) >> 2))) /* round */
2835 log2--;
2836 t3_write_reg(adap, A_TP_MTU_TABLE,
2837 (i << 24) | (log2 << 16) | mtu);
2838
2839 for (w = 0; w < NCCTRL_WIN; ++w) {
2840 unsigned int inc;
2841
2842 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2843 CC_MIN_INCR);
2844
2845 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2846 (w << 16) | (beta[w] << 13) | inc);
2847 }
2848 }
2849}
2850
2851/**
2852 * t3_read_hw_mtus - returns the values in the HW MTU table
2853 * @adap: the adapter
2854 * @mtus: where to store the HW MTU values
2855 *
2856 * Reads the HW MTU table.
2857 */
2858void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2859{
2860 int i;
2861
2862 for (i = 0; i < NMTUS; ++i) {
2863 unsigned int val;
2864
2865 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2866 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2867 mtus[i] = val & 0x3fff;
2868 }
2869}
2870
2871/**
2872 * t3_get_cong_cntl_tab - reads the congestion control table
2873 * @adap: the adapter
2874 * @incr: where to store the alpha values
2875 *
2876 * Reads the additive increments programmed into the HW congestion
2877 * control table.
2878 */
2879void t3_get_cong_cntl_tab(struct adapter *adap,
2880 unsigned short incr[NMTUS][NCCTRL_WIN])
2881{
2882 unsigned int mtu, w;
2883
2884 for (mtu = 0; mtu < NMTUS; ++mtu)
2885 for (w = 0; w < NCCTRL_WIN; ++w) {
2886 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2887 0xffff0000 | (mtu << 5) | w);
2888 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2889 0x1fff;
2890 }
2891}
2892
2893/**
2894 * t3_tp_get_mib_stats - read TP's MIB counters
2895 * @adap: the adapter
2896 * @tps: holds the returned counter values
2897 *
2898 * Returns the values of TP's MIB counters.
2899 */
2900void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2901{
2902 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2903 sizeof(*tps) / sizeof(u32), 0);
2904}
2905
2906#define ulp_region(adap, name, start, len) \
2907 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2908 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2909 (start) + (len) - 1); \
2910 start += len
2911
2912#define ulptx_region(adap, name, start, len) \
2913 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2914 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2915 (start) + (len) - 1)
2916
2917static void ulp_config(struct adapter *adap, const struct tp_params *p)
2918{
2919 unsigned int m = p->chan_rx_size;
2920
2921 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2922 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2923 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2924 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2925 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2926 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2927 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2928 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2929}
2930
Divy Le Ray480fe1a2007-05-30 21:10:58 -07002931/**
2932 * t3_set_proto_sram - set the contents of the protocol sram
2933 * @adapter: the adapter
2934 * @data: the protocol image
2935 *
2936 * Write the contents of the protocol SRAM.
2937 */
David Woodhouse2c733a12008-05-24 00:10:55 +01002938int t3_set_proto_sram(struct adapter *adap, const u8 *data)
Divy Le Ray480fe1a2007-05-30 21:10:58 -07002939{
2940 int i;
David Woodhouse2c733a12008-05-24 00:10:55 +01002941 const __be32 *buf = (const __be32 *)data;
Divy Le Ray480fe1a2007-05-30 21:10:58 -07002942
2943 for (i = 0; i < PROTO_SRAM_LINES; i++) {
Al Viro05e5c112007-12-22 18:56:23 +00002944 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2945 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2946 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2947 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2948 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
Jeff Garzik2eab17a2007-11-23 21:59:45 -05002949
Divy Le Ray480fe1a2007-05-30 21:10:58 -07002950 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2951 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2952 return -EIO;
2953 }
2954 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2955
2956 return 0;
2957}
2958
Divy Le Ray4d22de32007-01-18 22:04:14 -05002959void t3_config_trace_filter(struct adapter *adapter,
2960 const struct trace_params *tp, int filter_index,
2961 int invert, int enable)
2962{
2963 u32 addr, key[4], mask[4];
2964
2965 key[0] = tp->sport | (tp->sip << 16);
2966 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2967 key[2] = tp->dip;
2968 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2969
2970 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2971 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2972 mask[2] = tp->dip_mask;
2973 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2974
2975 if (invert)
2976 key[3] |= (1 << 29);
2977 if (enable)
2978 key[3] |= (1 << 28);
2979
2980 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2981 tp_wr_indirect(adapter, addr++, key[0]);
2982 tp_wr_indirect(adapter, addr++, mask[0]);
2983 tp_wr_indirect(adapter, addr++, key[1]);
2984 tp_wr_indirect(adapter, addr++, mask[1]);
2985 tp_wr_indirect(adapter, addr++, key[2]);
2986 tp_wr_indirect(adapter, addr++, mask[2]);
2987 tp_wr_indirect(adapter, addr++, key[3]);
2988 tp_wr_indirect(adapter, addr, mask[3]);
2989 t3_read_reg(adapter, A_TP_PIO_DATA);
2990}
2991
2992/**
2993 * t3_config_sched - configure a HW traffic scheduler
2994 * @adap: the adapter
2995 * @kbps: target rate in Kbps
2996 * @sched: the scheduler index
2997 *
2998 * Configure a HW scheduler for the target rate
2999 */
3000int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3001{
3002 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3003 unsigned int clk = adap->params.vpd.cclk * 1000;
3004 unsigned int selected_cpt = 0, selected_bpt = 0;
3005
3006 if (kbps > 0) {
3007 kbps *= 125; /* -> bytes */
3008 for (cpt = 1; cpt <= 255; cpt++) {
3009 tps = clk / cpt;
3010 bpt = (kbps + tps / 2) / tps;
3011 if (bpt > 0 && bpt <= 255) {
3012 v = bpt * tps;
3013 delta = v >= kbps ? v - kbps : kbps - v;
3014 if (delta <= mindelta) {
3015 mindelta = delta;
3016 selected_cpt = cpt;
3017 selected_bpt = bpt;
3018 }
3019 } else if (selected_cpt)
3020 break;
3021 }
3022 if (!selected_cpt)
3023 return -EINVAL;
3024 }
3025 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3026 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3027 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3028 if (sched & 1)
3029 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3030 else
3031 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3032 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3033 return 0;
3034}
3035
3036static int tp_init(struct adapter *adap, const struct tp_params *p)
3037{
3038 int busy = 0;
3039
3040 tp_config(adap, p);
3041 t3_set_vlan_accel(adap, 3, 0);
3042
3043 if (is_offload(adap)) {
3044 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3045 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3046 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3047 0, 1000, 5);
3048 if (busy)
3049 CH_ERR(adap, "TP initialization timed out\n");
3050 }
3051
3052 if (!busy)
3053 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3054 return busy;
3055}
3056
3057int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3058{
3059 if (port_mask & ~((1 << adap->params.nports) - 1))
3060 return -EINVAL;
3061 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3062 port_mask << S_PORT0ACTIVE);
3063 return 0;
3064}
3065
3066/*
3067 * Perform the bits of HW initialization that are dependent on the number
3068 * of available ports.
3069 */
3070static void init_hw_for_avail_ports(struct adapter *adap, int nports)
3071{
3072 int i;
3073
3074 if (nports == 1) {
3075 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3076 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3077 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
3078 F_PORT0ACTIVE | F_ENFORCEPKT);
Divy Le Ray8a9fab22007-05-30 21:10:52 -07003079 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003080 } else {
3081 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3082 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3083 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3084 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3085 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3086 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3087 F_ENFORCEPKT);
3088 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3089 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3090 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3091 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3092 for (i = 0; i < 16; i++)
3093 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3094 (i << 16) | 0x1010);
3095 }
3096}
3097
3098static int calibrate_xgm(struct adapter *adapter)
3099{
3100 if (uses_xaui(adapter)) {
3101 unsigned int v, i;
3102
3103 for (i = 0; i < 5; ++i) {
3104 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3105 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3106 msleep(1);
3107 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3108 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3109 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3110 V_XAUIIMP(G_CALIMP(v) >> 2));
3111 return 0;
3112 }
3113 }
3114 CH_ERR(adapter, "MAC calibration failed\n");
3115 return -1;
3116 } else {
3117 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3118 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3119 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3120 F_XGM_IMPSETUPDATE);
3121 }
3122 return 0;
3123}
3124
3125static void calibrate_xgm_t3b(struct adapter *adapter)
3126{
3127 if (!uses_xaui(adapter)) {
3128 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3129 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3130 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3131 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3132 F_XGM_IMPSETUPDATE);
3133 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3134 0);
3135 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3136 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3137 }
3138}
3139
3140struct mc7_timing_params {
3141 unsigned char ActToPreDly;
3142 unsigned char ActToRdWrDly;
3143 unsigned char PreCyc;
3144 unsigned char RefCyc[5];
3145 unsigned char BkCyc;
3146 unsigned char WrToRdDly;
3147 unsigned char RdToWrDly;
3148};
3149
3150/*
3151 * Write a value to a register and check that the write completed. These
3152 * writes normally complete in a cycle or two, so one read should suffice.
3153 * The very first read exists to flush the posted write to the device.
3154 */
3155static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3156{
3157 t3_write_reg(adapter, addr, val);
3158 t3_read_reg(adapter, addr); /* flush */
3159 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3160 return 0;
3161 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3162 return -EIO;
3163}
3164
3165static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3166{
3167 static const unsigned int mc7_mode[] = {
3168 0x632, 0x642, 0x652, 0x432, 0x442
3169 };
3170 static const struct mc7_timing_params mc7_timings[] = {
3171 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3172 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3173 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3174 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3175 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3176 };
3177
3178 u32 val;
3179 unsigned int width, density, slow, attempts;
3180 struct adapter *adapter = mc7->adapter;
3181 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3182
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003183 if (!mc7->size)
3184 return 0;
3185
Divy Le Ray4d22de32007-01-18 22:04:14 -05003186 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3187 slow = val & F_SLOW;
3188 width = G_WIDTH(val);
3189 density = G_DEN(val);
3190
3191 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3192 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3193 msleep(1);
3194
3195 if (!slow) {
3196 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3197 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3198 msleep(1);
3199 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3200 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3201 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3202 mc7->name);
3203 goto out_fail;
3204 }
3205 }
3206
3207 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3208 V_ACTTOPREDLY(p->ActToPreDly) |
3209 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3210 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3211 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3212
3213 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3214 val | F_CLKEN | F_TERM150);
3215 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3216
3217 if (!slow)
3218 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3219 F_DLLENB);
3220 udelay(1);
3221
3222 val = slow ? 3 : 6;
3223 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3224 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3225 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3226 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3227 goto out_fail;
3228
3229 if (!slow) {
3230 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3231 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3232 udelay(5);
3233 }
3234
3235 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3236 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3237 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3238 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3239 mc7_mode[mem_type]) ||
3240 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3241 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3242 goto out_fail;
3243
3244 /* clock value is in KHz */
3245 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3246 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3247
3248 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3249 F_PERREFEN | V_PREREFDIV(mc7_clock));
3250 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3251
3252 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3253 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3254 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3255 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3256 (mc7->size << width) - 1);
3257 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3258 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3259
3260 attempts = 50;
3261 do {
3262 msleep(250);
3263 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3264 } while ((val & F_BUSY) && --attempts);
3265 if (val & F_BUSY) {
3266 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3267 goto out_fail;
3268 }
3269
3270 /* Enable normal memory accesses. */
3271 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3272 return 0;
3273
3274out_fail:
3275 return -1;
3276}
3277
3278static void config_pcie(struct adapter *adap)
3279{
3280 static const u16 ack_lat[4][6] = {
3281 {237, 416, 559, 1071, 2095, 4143},
3282 {128, 217, 289, 545, 1057, 2081},
3283 {73, 118, 154, 282, 538, 1050},
3284 {67, 107, 86, 150, 278, 534}
3285 };
3286 static const u16 rpl_tmr[4][6] = {
3287 {711, 1248, 1677, 3213, 6285, 12429},
3288 {384, 651, 867, 1635, 3171, 6243},
3289 {219, 354, 462, 846, 1614, 3150},
3290 {201, 321, 258, 450, 834, 1602}
3291 };
3292
3293 u16 val;
3294 unsigned int log2_width, pldsize;
3295 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3296
3297 pci_read_config_word(adap->pdev,
3298 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3299 &val);
3300 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3301 pci_read_config_word(adap->pdev,
3302 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3303 &val);
3304
3305 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3306 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3307 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3308 log2_width = fls(adap->params.pci.width) - 1;
3309 acklat = ack_lat[log2_width][pldsize];
3310 if (val & 1) /* check LOsEnable */
3311 acklat += fst_trn_tx * 4;
3312 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3313
3314 if (adap->params.rev == 0)
3315 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3316 V_T3A_ACKLAT(M_T3A_ACKLAT),
3317 V_T3A_ACKLAT(acklat));
3318 else
3319 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3320 V_ACKLAT(acklat));
3321
3322 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3323 V_REPLAYLMT(rpllmt));
3324
3325 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
Divy Le Rayb8819552007-12-17 18:47:31 -08003326 t3_set_reg_field(adap, A_PCIE_CFG, 0,
Divy Le Ray204e2f92008-05-06 19:26:01 -07003327 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
Divy Le Rayb8819552007-12-17 18:47:31 -08003328 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003329}
3330
3331/*
3332 * Initialize and configure T3 HW modules. This performs the
3333 * initialization steps that need to be done once after a card is reset.
3334 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3335 *
3336 * fw_params are passed to FW and their value is platform dependent. Only the
3337 * top 8 bits are available for use, the rest must be 0.
3338 */
3339int t3_init_hw(struct adapter *adapter, u32 fw_params)
3340{
Divy Le Rayb8819552007-12-17 18:47:31 -08003341 int err = -EIO, attempts, i;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003342 const struct vpd_params *vpd = &adapter->params.vpd;
3343
3344 if (adapter->params.rev > 0)
3345 calibrate_xgm_t3b(adapter);
3346 else if (calibrate_xgm(adapter))
3347 goto out_err;
3348
3349 if (vpd->mclk) {
3350 partition_mem(adapter, &adapter->params.tp);
3351
3352 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3353 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3354 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3355 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3356 adapter->params.mc5.nfilters,
3357 adapter->params.mc5.nroutes))
3358 goto out_err;
Divy Le Rayb8819552007-12-17 18:47:31 -08003359
3360 for (i = 0; i < 32; i++)
3361 if (clear_sge_ctxt(adapter, i, F_CQ))
3362 goto out_err;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003363 }
3364
3365 if (tp_init(adapter, &adapter->params.tp))
3366 goto out_err;
3367
3368 t3_tp_set_coalescing_size(adapter,
3369 min(adapter->params.sge.max_pkt_size,
3370 MAX_RX_COALESCING_LEN), 1);
3371 t3_tp_set_max_rxsize(adapter,
3372 min(adapter->params.sge.max_pkt_size, 16384U));
3373 ulp_config(adapter, &adapter->params.tp);
3374
3375 if (is_pcie(adapter))
3376 config_pcie(adapter);
3377 else
Divy Le Rayb8819552007-12-17 18:47:31 -08003378 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3379 F_DMASTOPEN | F_CLIDECEN);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003380
Divy Le Raya2604be2007-11-16 11:22:16 -08003381 if (adapter->params.rev == T3_REV_C)
3382 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3383 F_CFG_CQE_SOP_MASK);
3384
Divy Le Ray8a9fab22007-05-30 21:10:52 -07003385 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
Divy Le Ray3f61e422007-08-21 20:49:41 -07003386 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3387 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003388 init_hw_for_avail_ports(adapter, adapter->params.nports);
3389 t3_sge_init(adapter, &adapter->params.sge);
3390
Divy Le Rayf231e0a2008-10-08 17:39:00 -07003391 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3392
Divy Le Ray4d22de32007-01-18 22:04:14 -05003393 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3394 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3395 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3396 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3397
Divy Le Rayb8819552007-12-17 18:47:31 -08003398 attempts = 100;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003399 do { /* wait for uP to initialize */
3400 msleep(20);
3401 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003402 if (!attempts) {
3403 CH_ERR(adapter, "uP initialization timed out\n");
Divy Le Ray4d22de32007-01-18 22:04:14 -05003404 goto out_err;
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003405 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05003406
3407 err = 0;
3408out_err:
3409 return err;
3410}
3411
3412/**
3413 * get_pci_mode - determine a card's PCI mode
3414 * @adapter: the adapter
3415 * @p: where to store the PCI settings
3416 *
3417 * Determines a card's PCI mode and associated parameters, such as speed
3418 * and width.
3419 */
Roland Dreier7b9b0942008-01-29 14:45:11 -08003420static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003421{
3422 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3423 u32 pci_mode, pcie_cap;
3424
3425 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3426 if (pcie_cap) {
3427 u16 val;
3428
3429 p->variant = PCI_VARIANT_PCIE;
3430 p->pcie_cap_addr = pcie_cap;
3431 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3432 &val);
3433 p->width = (val >> 4) & 0x3f;
3434 return;
3435 }
3436
3437 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3438 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3439 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3440 pci_mode = G_PCIXINITPAT(pci_mode);
3441 if (pci_mode == 0)
3442 p->variant = PCI_VARIANT_PCI;
3443 else if (pci_mode < 4)
3444 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3445 else if (pci_mode < 8)
3446 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3447 else
3448 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3449}
3450
3451/**
3452 * init_link_config - initialize a link's SW state
3453 * @lc: structure holding the link state
3454 * @ai: information about the current card
3455 *
3456 * Initializes the SW state maintained for each link, including the link's
3457 * capabilities and default speed/duplex/flow-control/autonegotiation
3458 * settings.
3459 */
Roland Dreier7b9b0942008-01-29 14:45:11 -08003460static void init_link_config(struct link_config *lc, unsigned int caps)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003461{
3462 lc->supported = caps;
3463 lc->requested_speed = lc->speed = SPEED_INVALID;
3464 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3465 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3466 if (lc->supported & SUPPORTED_Autoneg) {
3467 lc->advertising = lc->supported;
3468 lc->autoneg = AUTONEG_ENABLE;
3469 lc->requested_fc |= PAUSE_AUTONEG;
3470 } else {
3471 lc->advertising = 0;
3472 lc->autoneg = AUTONEG_DISABLE;
3473 }
3474}
3475
3476/**
3477 * mc7_calc_size - calculate MC7 memory size
3478 * @cfg: the MC7 configuration
3479 *
3480 * Calculates the size of an MC7 memory in bytes from the value of its
3481 * configuration register.
3482 */
Roland Dreier7b9b0942008-01-29 14:45:11 -08003483static unsigned int mc7_calc_size(u32 cfg)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003484{
3485 unsigned int width = G_WIDTH(cfg);
3486 unsigned int banks = !!(cfg & F_BKS) + 1;
3487 unsigned int org = !!(cfg & F_ORG) + 1;
3488 unsigned int density = G_DEN(cfg);
3489 unsigned int MBs = ((256 << density) * banks) / (org << width);
3490
3491 return MBs << 20;
3492}
3493
Roland Dreier7b9b0942008-01-29 14:45:11 -08003494static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3495 unsigned int base_addr, const char *name)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003496{
3497 u32 cfg;
3498
3499 mc7->adapter = adapter;
3500 mc7->name = name;
3501 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3502 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003503 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003504 mc7->width = G_WIDTH(cfg);
3505}
3506
3507void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3508{
3509 mac->adapter = adapter;
3510 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3511 mac->nucast = 1;
3512
3513 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3514 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3515 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3516 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3517 F_ENRGMII, 0);
3518 }
3519}
3520
3521void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3522{
3523 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3524
3525 mi1_init(adapter, ai);
3526 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3527 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3528 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3529 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003530 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
Divy Le Rayb8819552007-12-17 18:47:31 -08003531 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
Divy Le Ray4d22de32007-01-18 22:04:14 -05003532
3533 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3534 val |= F_ENRGMII;
3535
3536 /* Enable MAC clocks so we can access the registers */
3537 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3538 t3_read_reg(adapter, A_XGM_PORT_CFG);
3539
3540 val |= F_CLKDIVRESET_;
3541 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3542 t3_read_reg(adapter, A_XGM_PORT_CFG);
3543 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3544 t3_read_reg(adapter, A_XGM_PORT_CFG);
3545}
3546
3547/*
Jeff Garzik2eab17a2007-11-23 21:59:45 -05003548 * Reset the adapter.
Divy Le Raye4d08352007-03-18 13:10:17 -07003549 * Older PCIe cards lose their config space during reset, PCI-X
Divy Le Ray4d22de32007-01-18 22:04:14 -05003550 * ones don't.
3551 */
Divy Le Ray20d3fc12008-10-08 17:36:03 -07003552int t3_reset_adapter(struct adapter *adapter)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003553{
Jeff Garzik2eab17a2007-11-23 21:59:45 -05003554 int i, save_and_restore_pcie =
Divy Le Raye4d08352007-03-18 13:10:17 -07003555 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003556 uint16_t devid = 0;
3557
Divy Le Raye4d08352007-03-18 13:10:17 -07003558 if (save_and_restore_pcie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003559 pci_save_state(adapter->pdev);
3560 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3561
3562 /*
3563 * Delay. Give Some time to device to reset fully.
3564 * XXX The delay time should be modified.
3565 */
3566 for (i = 0; i < 10; i++) {
3567 msleep(50);
3568 pci_read_config_word(adapter->pdev, 0x00, &devid);
3569 if (devid == 0x1425)
3570 break;
3571 }
3572
3573 if (devid != 0x1425)
3574 return -1;
3575
Divy Le Raye4d08352007-03-18 13:10:17 -07003576 if (save_and_restore_pcie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003577 pci_restore_state(adapter->pdev);
3578 return 0;
3579}
3580
Roland Dreier7b9b0942008-01-29 14:45:11 -08003581static int init_parity(struct adapter *adap)
Divy Le Rayb8819552007-12-17 18:47:31 -08003582{
3583 int i, err, addr;
3584
3585 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3586 return -EBUSY;
3587
3588 for (err = i = 0; !err && i < 16; i++)
3589 err = clear_sge_ctxt(adap, i, F_EGRESS);
3590 for (i = 0xfff0; !err && i <= 0xffff; i++)
3591 err = clear_sge_ctxt(adap, i, F_EGRESS);
3592 for (i = 0; !err && i < SGE_QSETS; i++)
3593 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3594 if (err)
3595 return err;
3596
3597 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3598 for (i = 0; i < 4; i++)
3599 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3600 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3601 F_IBQDBGWR | V_IBQDBGQID(i) |
3602 V_IBQDBGADDR(addr));
3603 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3604 F_IBQDBGBUSY, 0, 2, 1);
3605 if (err)
3606 return err;
3607 }
3608 return 0;
3609}
3610
Divy Le Ray4d22de32007-01-18 22:04:14 -05003611/*
3612 * Initialize adapter SW state for the various HW modules, set initial values
3613 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3614 * interface.
3615 */
Roland Dreier7b9b0942008-01-29 14:45:11 -08003616int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3617 int reset)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003618{
3619 int ret;
Divy Le Ray04497982008-10-08 17:38:29 -07003620 unsigned int i, j = -1;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003621
3622 get_pci_mode(adapter, &adapter->params.pci);
3623
3624 adapter->params.info = ai;
3625 adapter->params.nports = ai->nports;
3626 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
Divy Le Rayfc8821962009-03-12 21:14:09 +00003627 /*
3628 * We used to only run the "adapter check task" once a second if
3629 * we had PHYs which didn't support interrupts (we would check
3630 * their link status once a second). Now we check other conditions
3631 * in that routine which could potentially impose a very high
3632 * interrupt load on the system. As such, we now always scan the
3633 * adapter state once a second ...
3634 */
3635 adapter->params.linkpoll_period = 10;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003636 adapter->params.stats_update_period = is_10G(adapter) ?
3637 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3638 adapter->params.pci.vpd_cap_addr =
3639 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3640 ret = get_vpd_params(adapter, &adapter->params.vpd);
3641 if (ret < 0)
3642 return ret;
3643
3644 if (reset && t3_reset_adapter(adapter))
3645 return -1;
3646
3647 t3_sge_prep(adapter, &adapter->params.sge);
3648
3649 if (adapter->params.vpd.mclk) {
3650 struct tp_params *p = &adapter->params.tp;
3651
3652 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3653 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3654 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3655
3656 p->nchan = ai->nports;
3657 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3658 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3659 p->cm_size = t3_mc7_size(&adapter->cm);
3660 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3661 p->chan_tx_size = p->pmtx_size / p->nchan;
3662 p->rx_pg_size = 64 * 1024;
3663 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3664 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3665 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3666 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3667 adapter->params.rev > 0 ? 12 : 6;
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003668 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05003669
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003670 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3671 t3_mc7_size(&adapter->pmtx) &&
3672 t3_mc7_size(&adapter->cm);
3673
3674 if (is_offload(adapter)) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05003675 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3676 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3677 DEFAULT_NFILTERS : 0;
3678 adapter->params.mc5.nroutes = 0;
3679 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3680
3681 init_mtus(adapter->params.mtus);
3682 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3683 }
3684
3685 early_hw_init(adapter, ai);
Divy Le Rayb8819552007-12-17 18:47:31 -08003686 ret = init_parity(adapter);
3687 if (ret)
3688 return ret;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003689
3690 for_each_port(adapter, i) {
3691 u8 hw_addr[6];
Divy Le Ray04497982008-10-08 17:38:29 -07003692 const struct port_type_info *pti;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003693 struct port_info *p = adap2pinfo(adapter, i);
3694
Divy Le Ray04497982008-10-08 17:38:29 -07003695 while (!adapter->params.vpd.port_type[++j])
3696 ;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003697
Divy Le Ray04497982008-10-08 17:38:29 -07003698 pti = &port_types[adapter->params.vpd.port_type[j]];
Divy Le Ray9f643062008-11-09 00:55:28 -08003699 if (!pti->phy_prep) {
3700 CH_ALERT(adapter, "Invalid port type index %d\n",
3701 adapter->params.vpd.port_type[j]);
3702 return -EINVAL;
3703 }
3704
Divy Le Ray04497982008-10-08 17:38:29 -07003705 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3706 ai->mdio_ops);
Divy Le Ray78e46892008-10-08 17:38:01 -07003707 if (ret)
3708 return ret;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003709 mac_prep(&p->mac, adapter, j);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003710
3711 /*
3712 * The VPD EEPROM stores the base Ethernet address for the
3713 * card. A port's address is derived from the base by adding
3714 * the port's index to the base's low octet.
3715 */
3716 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3717 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3718
3719 memcpy(adapter->port[i]->dev_addr, hw_addr,
3720 ETH_ALEN);
3721 memcpy(adapter->port[i]->perm_addr, hw_addr,
3722 ETH_ALEN);
Divy Le Ray04497982008-10-08 17:38:29 -07003723 init_link_config(&p->link_config, p->phy.caps);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003724 p->phy.ops->power_down(&p->phy, 1);
Divy Le Rayfc8821962009-03-12 21:14:09 +00003725
3726 /*
3727 * If the PHY doesn't support interrupts for link status
3728 * changes, schedule a scan of the adapter links at least
3729 * once a second.
3730 */
3731 if (!(p->phy.caps & SUPPORTED_IRQ) &&
3732 adapter->params.linkpoll_period > 10)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003733 adapter->params.linkpoll_period = 10;
3734 }
3735
3736 return 0;
3737}
3738
3739void t3_led_ready(struct adapter *adapter)
3740{
3741 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3742 F_GPIO0_OUT_VAL);
3743}
Divy Le Ray204e2f92008-05-06 19:26:01 -07003744
3745int t3_replay_prep_adapter(struct adapter *adapter)
3746{
3747 const struct adapter_info *ai = adapter->params.info;
Divy Le Ray04497982008-10-08 17:38:29 -07003748 unsigned int i, j = -1;
Divy Le Ray204e2f92008-05-06 19:26:01 -07003749 int ret;
3750
3751 early_hw_init(adapter, ai);
3752 ret = init_parity(adapter);
3753 if (ret)
3754 return ret;
3755
3756 for_each_port(adapter, i) {
Divy Le Ray04497982008-10-08 17:38:29 -07003757 const struct port_type_info *pti;
Divy Le Ray204e2f92008-05-06 19:26:01 -07003758 struct port_info *p = adap2pinfo(adapter, i);
Divy Le Ray204e2f92008-05-06 19:26:01 -07003759
Divy Le Ray04497982008-10-08 17:38:29 -07003760 while (!adapter->params.vpd.port_type[++j])
3761 ;
3762
3763 pti = &port_types[adapter->params.vpd.port_type[j]];
3764 ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
Divy Le Ray78e46892008-10-08 17:38:01 -07003765 if (ret)
3766 return ret;
Divy Le Ray204e2f92008-05-06 19:26:01 -07003767 p->phy.ops->power_down(&p->phy, 1);
Divy Le Ray204e2f92008-05-06 19:26:01 -07003768 }
3769
3770return 0;
3771}
3772