blob: d8a20f514f80fe92e9ae92dc689043c5512d0325 [file] [log] [blame]
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
Ben Hutchings0a6f40c2011-02-25 00:01:34 +00003 * Copyright 2008-2011 Solarflare Communications Inc.
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/delay.h>
Ben Hutchings251111d2013-08-27 23:04:29 +010011#include <asm/cmpxchg.h>
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000012#include "net_driver.h"
13#include "nic.h"
14#include "io.h"
Ben Hutchings8b8a95a2012-09-18 01:57:07 +010015#include "farch_regs.h"
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000016#include "mcdi_pcol.h"
17#include "phy.h"
18
19/**************************************************************************
20 *
21 * Management-Controller-to-Driver Interface
22 *
23 **************************************************************************
24 */
25
Ben Hutchingsebf98e72012-12-01 02:21:17 +000026#define MCDI_RPC_TIMEOUT (10 * HZ)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000027
Ben Hutchings3f713bf2011-12-20 23:39:31 +000028/* A reboot/assertion causes the MCDI status word to be set after the
29 * command word is set or a REBOOT event is sent. If we notice a reboot
Daniel Pieczkod36a08b2013-06-20 11:40:07 +010030 * via these mechanisms then wait 20ms for the status word to be set.
31 */
Ben Hutchings3f713bf2011-12-20 23:39:31 +000032#define MCDI_STATUS_DELAY_US 100
Daniel Pieczkod36a08b2013-06-20 11:40:07 +010033#define MCDI_STATUS_DELAY_COUNT 200
Ben Hutchings3f713bf2011-12-20 23:39:31 +000034#define MCDI_STATUS_SLEEP_MS \
35 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000036
37#define SEQ_MASK \
38 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
39
Ben Hutchingscade7152013-08-27 23:12:31 +010040struct efx_mcdi_async_param {
41 struct list_head list;
42 unsigned int cmd;
43 size_t inlen;
44 size_t outlen;
45 efx_mcdi_async_completer *complete;
46 unsigned long cookie;
47 /* followed by request/response buffer */
48};
49
50static void efx_mcdi_timeout_async(unsigned long context);
Ben Hutchings4c75b432013-08-29 19:04:03 +010051static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
52 bool *was_attached_out);
Ben Hutchingscade7152013-08-27 23:12:31 +010053
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000054static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
55{
Ben Hutchingsf3ad5002012-09-18 02:33:56 +010056 EFX_BUG_ON_PARANOID(!efx->mcdi);
57 return &efx->mcdi->iface;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000058}
59
Ben Hutchingsf073dde2012-09-18 02:33:55 +010060int efx_mcdi_init(struct efx_nic *efx)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000061{
62 struct efx_mcdi_iface *mcdi;
Ben Hutchings4c75b432013-08-29 19:04:03 +010063 bool already_attached;
64 int rc;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000065
Ben Hutchingsf3ad5002012-09-18 02:33:56 +010066 efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
67 if (!efx->mcdi)
68 return -ENOMEM;
69
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000070 mcdi = efx_mcdi(efx);
Ben Hutchingscade7152013-08-27 23:12:31 +010071 mcdi->efx = efx;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000072 init_waitqueue_head(&mcdi->wq);
73 spin_lock_init(&mcdi->iface_lock);
Ben Hutchings251111d2013-08-27 23:04:29 +010074 mcdi->state = MCDI_STATE_QUIESCENT;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000075 mcdi->mode = MCDI_MODE_POLL;
Ben Hutchingscade7152013-08-27 23:12:31 +010076 spin_lock_init(&mcdi->async_lock);
77 INIT_LIST_HEAD(&mcdi->async_list);
78 setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async,
79 (unsigned long)mcdi);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000080
81 (void) efx_mcdi_poll_reboot(efx);
Daniel Pieczkod36a08b2013-06-20 11:40:07 +010082 mcdi->new_epoch = true;
Ben Hutchingsf073dde2012-09-18 02:33:55 +010083
84 /* Recover from a failed assertion before probing */
Ben Hutchings4c75b432013-08-29 19:04:03 +010085 rc = efx_mcdi_handle_assertion(efx);
86 if (rc)
87 return rc;
88
89 /* Let the MC (and BMC, if this is a LOM) know that the driver
90 * is loaded. We should do this before we reset the NIC.
91 */
92 rc = efx_mcdi_drv_attach(efx, true, &already_attached);
93 if (rc) {
94 netif_err(efx, probe, efx->net_dev,
95 "Unable to register driver with MCPU\n");
96 return rc;
97 }
98 if (already_attached)
99 /* Not a fatal error */
100 netif_err(efx, probe, efx->net_dev,
101 "Host already registered with MCPU\n");
102
103 return 0;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000104}
105
Ben Hutchingsf3ad5002012-09-18 02:33:56 +0100106void efx_mcdi_fini(struct efx_nic *efx)
107{
Ben Hutchings4c75b432013-08-29 19:04:03 +0100108 if (!efx->mcdi)
109 return;
110
111 BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
112
113 /* Relinquish the device (back to the BMC, if this is a LOM) */
114 efx_mcdi_drv_attach(efx, false, NULL);
115
Ben Hutchingsf3ad5002012-09-18 02:33:56 +0100116 kfree(efx->mcdi);
117}
118
Ben Hutchings2f4bcdc2013-08-22 22:06:09 +0100119static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
120 const efx_dword_t *inbuf, size_t inlen)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000121{
122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100123 efx_dword_t hdr[2];
124 size_t hdr_len;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000125 u32 xflags, seqno;
126
Ben Hutchings251111d2013-08-27 23:04:29 +0100127 BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000128
Ben Hutchings2f4bcdc2013-08-22 22:06:09 +0100129 /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
130 spin_lock_bh(&mcdi->iface_lock);
131 ++mcdi->seqno;
132 spin_unlock_bh(&mcdi->iface_lock);
133
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000134 seqno = mcdi->seqno & SEQ_MASK;
135 xflags = 0;
136 if (mcdi->mode == MCDI_MODE_EVENTS)
137 xflags |= MCDI_HEADER_XFLAGS_EVREQ;
138
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100139 if (efx->type->mcdi_max_ver == 1) {
140 /* MCDI v1 */
Daniel Pieczkod36a08b2013-06-20 11:40:07 +0100141 EFX_POPULATE_DWORD_7(hdr[0],
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100142 MCDI_HEADER_RESPONSE, 0,
143 MCDI_HEADER_RESYNC, 1,
144 MCDI_HEADER_CODE, cmd,
145 MCDI_HEADER_DATALEN, inlen,
146 MCDI_HEADER_SEQ, seqno,
Daniel Pieczkod36a08b2013-06-20 11:40:07 +0100147 MCDI_HEADER_XFLAGS, xflags,
148 MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100149 hdr_len = 4;
150 } else {
151 /* MCDI v2 */
152 BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
Daniel Pieczkod36a08b2013-06-20 11:40:07 +0100153 EFX_POPULATE_DWORD_7(hdr[0],
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100154 MCDI_HEADER_RESPONSE, 0,
155 MCDI_HEADER_RESYNC, 1,
156 MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
157 MCDI_HEADER_DATALEN, 0,
158 MCDI_HEADER_SEQ, seqno,
Daniel Pieczkod36a08b2013-06-20 11:40:07 +0100159 MCDI_HEADER_XFLAGS, xflags,
160 MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100161 EFX_POPULATE_DWORD_2(hdr[1],
162 MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
163 MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
164 hdr_len = 8;
165 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000166
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100167 efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
Ben Hutchings2f4bcdc2013-08-22 22:06:09 +0100168
169 mcdi->new_epoch = false;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000170}
171
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100172static int efx_mcdi_errno(unsigned int mcdi_err)
173{
174 switch (mcdi_err) {
175 case 0:
176 return 0;
177#define TRANSLATE_ERROR(name) \
178 case MC_CMD_ERR_ ## name: \
179 return -name;
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100180 TRANSLATE_ERROR(EPERM);
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100181 TRANSLATE_ERROR(ENOENT);
182 TRANSLATE_ERROR(EINTR);
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100183 TRANSLATE_ERROR(EAGAIN);
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100184 TRANSLATE_ERROR(EACCES);
185 TRANSLATE_ERROR(EBUSY);
186 TRANSLATE_ERROR(EINVAL);
187 TRANSLATE_ERROR(EDEADLK);
188 TRANSLATE_ERROR(ENOSYS);
189 TRANSLATE_ERROR(ETIME);
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100190 TRANSLATE_ERROR(EALREADY);
191 TRANSLATE_ERROR(ENOSPC);
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100192#undef TRANSLATE_ERROR
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100193 case MC_CMD_ERR_ALLOC_FAIL:
194 return -ENOBUFS;
195 case MC_CMD_ERR_MAC_EXIST:
196 return -EADDRINUSE;
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100197 default:
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100198 return -EPROTO;
199 }
200}
201
202static void efx_mcdi_read_response_header(struct efx_nic *efx)
203{
204 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
205 unsigned int respseq, respcmd, error;
206 efx_dword_t hdr;
207
208 efx->type->mcdi_read_response(efx, &hdr, 0, 4);
209 respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
210 respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
211 error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
212
213 if (respcmd != MC_CMD_V2_EXTN) {
214 mcdi->resp_hdr_len = 4;
215 mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
216 } else {
217 efx->type->mcdi_read_response(efx, &hdr, 4, 4);
218 mcdi->resp_hdr_len = 8;
219 mcdi->resp_data_len =
220 EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
221 }
222
223 if (error && mcdi->resp_data_len == 0) {
224 netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
225 mcdi->resprc = -EIO;
226 } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
227 netif_err(efx, hw, efx->net_dev,
228 "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
229 respseq, mcdi->seqno);
230 mcdi->resprc = -EIO;
231 } else if (error) {
232 efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
233 mcdi->resprc =
234 efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
235 } else {
236 mcdi->resprc = 0;
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100237 }
238}
239
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000240static int efx_mcdi_poll(struct efx_nic *efx)
241{
242 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
Ben Hutchingsebf98e72012-12-01 02:21:17 +0000243 unsigned long time, finish;
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100244 unsigned int spins;
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100245 int rc;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000246
247 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100248 rc = efx_mcdi_poll_reboot(efx);
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100249 if (rc) {
Ben Hutchings369327f2012-10-26 17:53:12 +0100250 spin_lock_bh(&mcdi->iface_lock);
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100251 mcdi->resprc = rc;
252 mcdi->resp_hdr_len = 0;
253 mcdi->resp_data_len = 0;
Ben Hutchings369327f2012-10-26 17:53:12 +0100254 spin_unlock_bh(&mcdi->iface_lock);
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100255 return 0;
256 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000257
258 /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
259 * because generally mcdi responses are fast. After that, back off
260 * and poll once a jiffy (approximately)
261 */
262 spins = TICK_USEC;
Ben Hutchingsebf98e72012-12-01 02:21:17 +0000263 finish = jiffies + MCDI_RPC_TIMEOUT;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000264
265 while (1) {
266 if (spins != 0) {
267 --spins;
268 udelay(1);
Ben Hutchings55029c12010-01-13 04:34:25 +0000269 } else {
270 schedule_timeout_uninterruptible(1);
271 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000272
Ben Hutchingsebf98e72012-12-01 02:21:17 +0000273 time = jiffies;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000274
Ben Hutchings86c432c2011-09-01 12:09:29 +0000275 rmb();
Ben Hutchingsf3ad5002012-09-18 02:33:56 +0100276 if (efx->type->mcdi_poll_response(efx))
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000277 break;
278
Ben Hutchingsebf98e72012-12-01 02:21:17 +0000279 if (time_after(time, finish))
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000280 return -ETIMEDOUT;
281 }
282
Ben Hutchings369327f2012-10-26 17:53:12 +0100283 spin_lock_bh(&mcdi->iface_lock);
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100284 efx_mcdi_read_response_header(efx);
Ben Hutchings369327f2012-10-26 17:53:12 +0100285 spin_unlock_bh(&mcdi->iface_lock);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000286
287 /* Return rc=0 like wait_event_timeout() */
288 return 0;
289}
290
Ben Hutchings876be082012-10-01 20:58:35 +0100291/* Test and clear MC-rebooted flag for this port/function; reset
292 * software state as necessary.
293 */
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000294int efx_mcdi_poll_reboot(struct efx_nic *efx)
295{
Ben Hutchingsf3ad5002012-09-18 02:33:56 +0100296 if (!efx->mcdi)
297 return 0;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000298
Ben Hutchingscd0ecc92012-12-14 21:52:56 +0000299 return efx->type->mcdi_poll_reboot(efx);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000300}
301
Ben Hutchingscade7152013-08-27 23:12:31 +0100302static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
303{
304 return cmpxchg(&mcdi->state,
305 MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
306 MCDI_STATE_QUIESCENT;
307}
308
309static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000310{
311 /* Wait until the interface becomes QUIESCENT and we win the race
Ben Hutchingscade7152013-08-27 23:12:31 +0100312 * to mark it RUNNING_SYNC.
313 */
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000314 wait_event(mcdi->wq,
Ben Hutchings251111d2013-08-27 23:04:29 +0100315 cmpxchg(&mcdi->state,
Ben Hutchingscade7152013-08-27 23:12:31 +0100316 MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
Ben Hutchings251111d2013-08-27 23:04:29 +0100317 MCDI_STATE_QUIESCENT);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000318}
319
320static int efx_mcdi_await_completion(struct efx_nic *efx)
321{
322 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
323
Ben Hutchings251111d2013-08-27 23:04:29 +0100324 if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
325 MCDI_RPC_TIMEOUT) == 0)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000326 return -ETIMEDOUT;
327
328 /* Check if efx_mcdi_set_mode() switched us back to polled completions.
329 * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
330 * completed the request first, then we'll just end up completing the
331 * request again, which is safe.
332 *
333 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
334 * wait_event_timeout() implicitly provides.
335 */
336 if (mcdi->mode == MCDI_MODE_POLL)
337 return efx_mcdi_poll(efx);
338
339 return 0;
340}
341
Ben Hutchingscade7152013-08-27 23:12:31 +0100342/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
343 * requester. Return whether this was done. Does not take any locks.
344 */
345static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000346{
Ben Hutchingscade7152013-08-27 23:12:31 +0100347 if (cmpxchg(&mcdi->state,
348 MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
349 MCDI_STATE_RUNNING_SYNC) {
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000350 wake_up(&mcdi->wq);
351 return true;
352 }
353
354 return false;
355}
356
357static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
358{
Ben Hutchingscade7152013-08-27 23:12:31 +0100359 if (mcdi->mode == MCDI_MODE_EVENTS) {
360 struct efx_mcdi_async_param *async;
361 struct efx_nic *efx = mcdi->efx;
362
363 /* Process the asynchronous request queue */
364 spin_lock_bh(&mcdi->async_lock);
365 async = list_first_entry_or_null(
366 &mcdi->async_list, struct efx_mcdi_async_param, list);
367 if (async) {
368 mcdi->state = MCDI_STATE_RUNNING_ASYNC;
369 efx_mcdi_send_request(efx, async->cmd,
370 (const efx_dword_t *)(async + 1),
371 async->inlen);
372 mod_timer(&mcdi->async_timer,
373 jiffies + MCDI_RPC_TIMEOUT);
374 }
375 spin_unlock_bh(&mcdi->async_lock);
376
377 if (async)
378 return;
379 }
380
Ben Hutchings251111d2013-08-27 23:04:29 +0100381 mcdi->state = MCDI_STATE_QUIESCENT;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000382 wake_up(&mcdi->wq);
383}
384
Ben Hutchingscade7152013-08-27 23:12:31 +0100385/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
386 * asynchronous completion function, and release the interface.
387 * Return whether this was done. Must be called in bh-disabled
388 * context. Will take iface_lock and async_lock.
389 */
390static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
391{
392 struct efx_nic *efx = mcdi->efx;
393 struct efx_mcdi_async_param *async;
394 size_t hdr_len, data_len;
395 efx_dword_t *outbuf;
396 int rc;
397
398 if (cmpxchg(&mcdi->state,
399 MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
400 MCDI_STATE_RUNNING_ASYNC)
401 return false;
402
403 spin_lock(&mcdi->iface_lock);
404 if (timeout) {
405 /* Ensure that if the completion event arrives later,
406 * the seqno check in efx_mcdi_ev_cpl() will fail
407 */
408 ++mcdi->seqno;
409 ++mcdi->credits;
410 rc = -ETIMEDOUT;
411 hdr_len = 0;
412 data_len = 0;
413 } else {
414 rc = mcdi->resprc;
415 hdr_len = mcdi->resp_hdr_len;
416 data_len = mcdi->resp_data_len;
417 }
418 spin_unlock(&mcdi->iface_lock);
419
420 /* Stop the timer. In case the timer function is running, we
421 * must wait for it to return so that there is no possibility
422 * of it aborting the next request.
423 */
424 if (!timeout)
425 del_timer_sync(&mcdi->async_timer);
426
427 spin_lock(&mcdi->async_lock);
428 async = list_first_entry(&mcdi->async_list,
429 struct efx_mcdi_async_param, list);
430 list_del(&async->list);
431 spin_unlock(&mcdi->async_lock);
432
433 outbuf = (efx_dword_t *)(async + 1);
434 efx->type->mcdi_read_response(efx, outbuf, hdr_len,
435 min(async->outlen, data_len));
436 async->complete(efx, async->cookie, rc, outbuf, data_len);
437 kfree(async);
438
439 efx_mcdi_release(mcdi);
440
441 return true;
442}
443
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000444static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100445 unsigned int datalen, unsigned int mcdi_err)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000446{
447 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
448 bool wake = false;
449
450 spin_lock(&mcdi->iface_lock);
451
452 if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
453 if (mcdi->credits)
454 /* The request has been cancelled */
455 --mcdi->credits;
456 else
Ben Hutchings62776d02010-06-23 11:30:07 +0000457 netif_err(efx, hw, efx->net_dev,
458 "MC response mismatch tx seq 0x%x rx "
459 "seq 0x%x\n", seqno, mcdi->seqno);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000460 } else {
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100461 if (efx->type->mcdi_max_ver >= 2) {
462 /* MCDI v2 responses don't fit in an event */
463 efx_mcdi_read_response_header(efx);
464 } else {
465 mcdi->resprc = efx_mcdi_errno(mcdi_err);
466 mcdi->resp_hdr_len = 4;
467 mcdi->resp_data_len = datalen;
468 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000469
470 wake = true;
471 }
472
473 spin_unlock(&mcdi->iface_lock);
474
Ben Hutchingscade7152013-08-27 23:12:31 +0100475 if (wake) {
476 if (!efx_mcdi_complete_async(mcdi, false))
477 (void) efx_mcdi_complete_sync(mcdi);
478
479 /* If the interface isn't RUNNING_ASYNC or
480 * RUNNING_SYNC then we've received a duplicate
481 * completion after we've already transitioned back to
482 * QUIESCENT. [A subsequent invocation would increment
483 * seqno, so would have failed the seqno check].
484 */
485 }
486}
487
488static void efx_mcdi_timeout_async(unsigned long context)
489{
490 struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context;
491
492 efx_mcdi_complete_async(mcdi, true);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000493}
494
Ben Hutchings2f4bcdc2013-08-22 22:06:09 +0100495static int
496efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
497{
498 if (efx->type->mcdi_max_ver < 0 ||
499 (efx->type->mcdi_max_ver < 2 &&
500 cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
501 return -EINVAL;
502
503 if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
504 (efx->type->mcdi_max_ver < 2 &&
505 inlen > MCDI_CTL_SDU_LEN_MAX_V1))
506 return -EMSGSIZE;
507
508 return 0;
509}
510
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000511int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
Ben Hutchings9528b922012-09-14 17:31:41 +0100512 const efx_dword_t *inbuf, size_t inlen,
513 efx_dword_t *outbuf, size_t outlen,
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000514 size_t *outlen_actual)
515{
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100516 int rc;
517
518 rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
519 if (rc)
520 return rc;
Stuart Hodgsonc3cba722012-07-16 17:40:47 +0100521 return efx_mcdi_rpc_finish(efx, cmd, inlen,
522 outbuf, outlen, outlen_actual);
523}
524
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100525int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
526 const efx_dword_t *inbuf, size_t inlen)
Stuart Hodgsonc3cba722012-07-16 17:40:47 +0100527{
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000528 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
Ben Hutchings2f4bcdc2013-08-22 22:06:09 +0100529 int rc;
Stuart Hodgsonc3cba722012-07-16 17:40:47 +0100530
Ben Hutchings2f4bcdc2013-08-22 22:06:09 +0100531 rc = efx_mcdi_check_supported(efx, cmd, inlen);
532 if (rc)
533 return rc;
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100534
Ben Hutchingscade7152013-08-27 23:12:31 +0100535 efx_mcdi_acquire_sync(mcdi);
Ben Hutchings2f4bcdc2013-08-22 22:06:09 +0100536 efx_mcdi_send_request(efx, cmd, inbuf, inlen);
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100537 return 0;
Stuart Hodgsonc3cba722012-07-16 17:40:47 +0100538}
539
Ben Hutchingscade7152013-08-27 23:12:31 +0100540/**
541 * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
542 * @efx: NIC through which to issue the command
543 * @cmd: Command type number
544 * @inbuf: Command parameters
545 * @inlen: Length of command parameters, in bytes
546 * @outlen: Length to allocate for response buffer, in bytes
547 * @complete: Function to be called on completion or cancellation.
548 * @cookie: Arbitrary value to be passed to @complete.
549 *
550 * This function does not sleep and therefore may be called in atomic
551 * context. It will fail if event queues are disabled or if MCDI
552 * event completions have been disabled due to an error.
553 *
554 * If it succeeds, the @complete function will be called exactly once
555 * in atomic context, when one of the following occurs:
556 * (a) the completion event is received (in NAPI context)
557 * (b) event queues are disabled (in the process that disables them)
558 * (c) the request times-out (in timer context)
559 */
560int
561efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
562 const efx_dword_t *inbuf, size_t inlen, size_t outlen,
563 efx_mcdi_async_completer *complete, unsigned long cookie)
564{
565 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
566 struct efx_mcdi_async_param *async;
567 int rc;
568
569 rc = efx_mcdi_check_supported(efx, cmd, inlen);
570 if (rc)
571 return rc;
572
573 async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
574 GFP_ATOMIC);
575 if (!async)
576 return -ENOMEM;
577
578 async->cmd = cmd;
579 async->inlen = inlen;
580 async->outlen = outlen;
581 async->complete = complete;
582 async->cookie = cookie;
583 memcpy(async + 1, inbuf, inlen);
584
585 spin_lock_bh(&mcdi->async_lock);
586
587 if (mcdi->mode == MCDI_MODE_EVENTS) {
588 list_add_tail(&async->list, &mcdi->async_list);
589
590 /* If this is at the front of the queue, try to start it
591 * immediately
592 */
593 if (mcdi->async_list.next == &async->list &&
594 efx_mcdi_acquire_async(mcdi)) {
595 efx_mcdi_send_request(efx, cmd, inbuf, inlen);
596 mod_timer(&mcdi->async_timer,
597 jiffies + MCDI_RPC_TIMEOUT);
598 }
599 } else {
600 kfree(async);
601 rc = -ENETDOWN;
602 }
603
604 spin_unlock_bh(&mcdi->async_lock);
605
606 return rc;
607}
608
Stuart Hodgsonc3cba722012-07-16 17:40:47 +0100609int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
Ben Hutchings9528b922012-09-14 17:31:41 +0100610 efx_dword_t *outbuf, size_t outlen,
611 size_t *outlen_actual)
Stuart Hodgsonc3cba722012-07-16 17:40:47 +0100612{
613 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
614 int rc;
615
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000616 if (mcdi->mode == MCDI_MODE_POLL)
617 rc = efx_mcdi_poll(efx);
618 else
619 rc = efx_mcdi_await_completion(efx);
620
621 if (rc != 0) {
622 /* Close the race with efx_mcdi_ev_cpl() executing just too late
623 * and completing a request we've just cancelled, by ensuring
624 * that the seqno check therein fails.
625 */
626 spin_lock_bh(&mcdi->iface_lock);
627 ++mcdi->seqno;
628 ++mcdi->credits;
629 spin_unlock_bh(&mcdi->iface_lock);
630
Ben Hutchings62776d02010-06-23 11:30:07 +0000631 netif_err(efx, hw, efx->net_dev,
632 "MC command 0x%x inlen %d mode %d timed out\n",
633 cmd, (int)inlen, mcdi->mode);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000634 } else {
Ben Hutchings369327f2012-10-26 17:53:12 +0100635 size_t hdr_len, data_len;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000636
637 /* At the very least we need a memory barrier here to ensure
638 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
639 * a spurious efx_mcdi_ev_cpl() running concurrently by
640 * acquiring the iface_lock. */
641 spin_lock_bh(&mcdi->iface_lock);
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100642 rc = mcdi->resprc;
Ben Hutchings369327f2012-10-26 17:53:12 +0100643 hdr_len = mcdi->resp_hdr_len;
644 data_len = mcdi->resp_data_len;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000645 spin_unlock_bh(&mcdi->iface_lock);
646
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100647 BUG_ON(rc > 0);
648
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000649 if (rc == 0) {
Ben Hutchings369327f2012-10-26 17:53:12 +0100650 efx->type->mcdi_read_response(efx, outbuf, hdr_len,
651 min(outlen, data_len));
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000652 if (outlen_actual != NULL)
Ben Hutchings369327f2012-10-26 17:53:12 +0100653 *outlen_actual = data_len;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000654 } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
655 ; /* Don't reset if MC_CMD_REBOOT returns EIO */
656 else if (rc == -EIO || rc == -EINTR) {
Ben Hutchings62776d02010-06-23 11:30:07 +0000657 netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
658 -rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000659 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
660 } else
Ben Hutchingsf18ca362010-12-02 13:46:09 +0000661 netif_dbg(efx, hw, efx->net_dev,
Ben Hutchings62776d02010-06-23 11:30:07 +0000662 "MC command 0x%x inlen %d failed rc=%d\n",
663 cmd, (int)inlen, -rc);
Ben Hutchings3f713bf2011-12-20 23:39:31 +0000664
665 if (rc == -EIO || rc == -EINTR) {
666 msleep(MCDI_STATUS_SLEEP_MS);
667 efx_mcdi_poll_reboot(efx);
Daniel Pieczkod36a08b2013-06-20 11:40:07 +0100668 mcdi->new_epoch = true;
Ben Hutchings3f713bf2011-12-20 23:39:31 +0000669 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000670 }
671
672 efx_mcdi_release(mcdi);
673 return rc;
674}
675
Ben Hutchingscade7152013-08-27 23:12:31 +0100676/* Switch to polled MCDI completions. This can be called in various
677 * error conditions with various locks held, so it must be lockless.
678 * Caller is responsible for flushing asynchronous requests later.
679 */
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000680void efx_mcdi_mode_poll(struct efx_nic *efx)
681{
682 struct efx_mcdi_iface *mcdi;
683
Ben Hutchingsf3ad5002012-09-18 02:33:56 +0100684 if (!efx->mcdi)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000685 return;
686
687 mcdi = efx_mcdi(efx);
688 if (mcdi->mode == MCDI_MODE_POLL)
689 return;
690
691 /* We can switch from event completion to polled completion, because
692 * mcdi requests are always completed in shared memory. We do this by
693 * switching the mode to POLL'd then completing the request.
694 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
695 *
696 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
Ben Hutchingscade7152013-08-27 23:12:31 +0100697 * which efx_mcdi_complete_sync() provides for us.
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000698 */
699 mcdi->mode = MCDI_MODE_POLL;
700
Ben Hutchingscade7152013-08-27 23:12:31 +0100701 efx_mcdi_complete_sync(mcdi);
702}
703
704/* Flush any running or queued asynchronous requests, after event processing
705 * is stopped
706 */
707void efx_mcdi_flush_async(struct efx_nic *efx)
708{
709 struct efx_mcdi_async_param *async, *next;
710 struct efx_mcdi_iface *mcdi;
711
712 if (!efx->mcdi)
713 return;
714
715 mcdi = efx_mcdi(efx);
716
717 /* We must be in polling mode so no more requests can be queued */
718 BUG_ON(mcdi->mode != MCDI_MODE_POLL);
719
720 del_timer_sync(&mcdi->async_timer);
721
722 /* If a request is still running, make sure we give the MC
723 * time to complete it so that the response won't overwrite our
724 * next request.
725 */
726 if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
727 efx_mcdi_poll(efx);
728 mcdi->state = MCDI_STATE_QUIESCENT;
729 }
730
731 /* Nothing else will access the async list now, so it is safe
732 * to walk it without holding async_lock. If we hold it while
733 * calling a completer then lockdep may warn that we have
734 * acquired locks in the wrong order.
735 */
736 list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
737 async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
738 list_del(&async->list);
739 kfree(async);
740 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000741}
742
743void efx_mcdi_mode_event(struct efx_nic *efx)
744{
745 struct efx_mcdi_iface *mcdi;
746
Ben Hutchingsf3ad5002012-09-18 02:33:56 +0100747 if (!efx->mcdi)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000748 return;
749
750 mcdi = efx_mcdi(efx);
751
752 if (mcdi->mode == MCDI_MODE_EVENTS)
753 return;
754
755 /* We can't switch from polled to event completion in the middle of a
756 * request, because the completion method is specified in the request.
757 * So acquire the interface to serialise the requestors. We don't need
758 * to acquire the iface_lock to change the mode here, but we do need a
759 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
760 * efx_mcdi_acquire() provides.
761 */
Ben Hutchingscade7152013-08-27 23:12:31 +0100762 efx_mcdi_acquire_sync(mcdi);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000763 mcdi->mode = MCDI_MODE_EVENTS;
764 efx_mcdi_release(mcdi);
765}
766
767static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
768{
769 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
770
771 /* If there is an outstanding MCDI request, it has been terminated
772 * either by a BADASSERT or REBOOT event. If the mcdi interface is
773 * in polled mode, then do nothing because the MC reboot handler will
774 * set the header correctly. However, if the mcdi interface is waiting
775 * for a CMDDONE event it won't receive it [and since all MCDI events
776 * are sent to the same queue, we can't be racing with
777 * efx_mcdi_ev_cpl()]
778 *
Ben Hutchingscade7152013-08-27 23:12:31 +0100779 * If there is an outstanding asynchronous request, we can't
780 * complete it now (efx_mcdi_complete() would deadlock). The
781 * reset process will take care of this.
782 *
783 * There's a race here with efx_mcdi_send_request(), because
784 * we might receive a REBOOT event *before* the request has
785 * been copied out. In polled mode (during startup) this is
786 * irrelevant, because efx_mcdi_complete_sync() is ignored. In
787 * event mode, this condition is just an edge-case of
788 * receiving a REBOOT event after posting the MCDI
789 * request. Did the mc reboot before or after the copyout? The
790 * best we can do always is just return failure.
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000791 */
792 spin_lock(&mcdi->iface_lock);
Ben Hutchingscade7152013-08-27 23:12:31 +0100793 if (efx_mcdi_complete_sync(mcdi)) {
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000794 if (mcdi->mode == MCDI_MODE_EVENTS) {
795 mcdi->resprc = rc;
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100796 mcdi->resp_hdr_len = 0;
797 mcdi->resp_data_len = 0;
Steve Hodgson18e3ee22010-12-02 13:46:55 +0000798 ++mcdi->credits;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000799 }
Ben Hutchings3f713bf2011-12-20 23:39:31 +0000800 } else {
801 int count;
802
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000803 /* Nobody was waiting for an MCDI request, so trigger a reset */
804 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
805
Ben Hutchings3f713bf2011-12-20 23:39:31 +0000806 /* Consume the status word since efx_mcdi_rpc_finish() won't */
807 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
808 if (efx_mcdi_poll_reboot(efx))
809 break;
810 udelay(MCDI_STATUS_DELAY_US);
811 }
Daniel Pieczkod36a08b2013-06-20 11:40:07 +0100812 mcdi->new_epoch = true;
Ben Hutchings3f713bf2011-12-20 23:39:31 +0000813 }
814
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000815 spin_unlock(&mcdi->iface_lock);
816}
817
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000818/* Called from falcon_process_eventq for MCDI events */
819void efx_mcdi_process_event(struct efx_channel *channel,
820 efx_qword_t *event)
821{
822 struct efx_nic *efx = channel->efx;
823 int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
824 u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
825
826 switch (code) {
827 case MCDI_EVENT_CODE_BADSSERT:
Ben Hutchings62776d02010-06-23 11:30:07 +0000828 netif_err(efx, hw, efx->net_dev,
829 "MC watchdog or assertion failure at 0x%x\n", data);
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100830 efx_mcdi_ev_death(efx, -EINTR);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000831 break;
832
833 case MCDI_EVENT_CODE_PMNOTICE:
Ben Hutchings62776d02010-06-23 11:30:07 +0000834 netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000835 break;
836
837 case MCDI_EVENT_CODE_CMDDONE:
838 efx_mcdi_ev_cpl(efx,
839 MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
840 MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
841 MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
842 break;
843
844 case MCDI_EVENT_CODE_LINKCHANGE:
845 efx_mcdi_process_link_change(efx, event);
846 break;
847 case MCDI_EVENT_CODE_SENSOREVT:
848 efx_mcdi_sensor_event(efx, event);
849 break;
850 case MCDI_EVENT_CODE_SCHEDERR:
Ben Hutchings62776d02010-06-23 11:30:07 +0000851 netif_info(efx, hw, efx->net_dev,
852 "MC Scheduler error address=0x%x\n", data);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000853 break;
854 case MCDI_EVENT_CODE_REBOOT:
Ben Hutchings62776d02010-06-23 11:30:07 +0000855 netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100856 efx_mcdi_ev_death(efx, -EIO);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000857 break;
858 case MCDI_EVENT_CODE_MAC_STATS_DMA:
859 /* MAC stats are gather lazily. We can ignore this. */
860 break;
Ben Hutchingscd2d5b52012-02-14 00:48:07 +0000861 case MCDI_EVENT_CODE_FLR:
862 efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
863 break;
Stuart Hodgson7c236c42012-09-03 11:09:36 +0100864 case MCDI_EVENT_CODE_PTP_RX:
865 case MCDI_EVENT_CODE_PTP_FAULT:
866 case MCDI_EVENT_CODE_PTP_PPS:
867 efx_ptp_event(efx, event);
868 break;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000869
Alexandre Rames3de82b92013-06-13 11:36:15 +0100870 case MCDI_EVENT_CODE_TX_ERR:
871 case MCDI_EVENT_CODE_RX_ERR:
872 netif_err(efx, hw, efx->net_dev,
873 "%s DMA error (event: "EFX_QWORD_FMT")\n",
874 code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
875 EFX_QWORD_VAL(*event));
876 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
877 break;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000878 default:
Ben Hutchings62776d02010-06-23 11:30:07 +0000879 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
880 code);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000881 }
882}
883
884/**************************************************************************
885 *
886 * Specific request functions
887 *
888 **************************************************************************
889 */
890
Ben Hutchingse5f0fd22011-02-24 23:57:47 +0000891void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000892{
Ben Hutchings59cfc472012-09-14 17:30:10 +0100893 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000894 size_t outlength;
895 const __le16 *ver_words;
896 int rc;
897
898 BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
899
900 rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
901 outbuf, sizeof(outbuf), &outlength);
902 if (rc)
903 goto fail;
904
Ben Hutchings05a93202011-12-20 00:44:06 +0000905 if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
Ben Hutchings00bbb4a2010-04-28 09:27:14 +0000906 rc = -EIO;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000907 goto fail;
908 }
909
910 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
Ben Hutchingse5f0fd22011-02-24 23:57:47 +0000911 snprintf(buf, len, "%u.%u.%u.%u",
912 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
913 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
914 return;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000915
916fail:
Ben Hutchings62776d02010-06-23 11:30:07 +0000917 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingse5f0fd22011-02-24 23:57:47 +0000918 buf[0] = 0;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000919}
920
Ben Hutchings4c75b432013-08-29 19:04:03 +0100921static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
922 bool *was_attached)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000923{
Ben Hutchings59cfc472012-09-14 17:30:10 +0100924 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
925 MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000926 size_t outlen;
927 int rc;
928
929 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
930 driver_operating ? 1 : 0);
931 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
Ben Hutchingsf2b0bef2013-08-20 20:35:50 +0100932 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000933
934 rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
935 outbuf, sizeof(outbuf), &outlen);
936 if (rc)
937 goto fail;
Ben Hutchings00bbb4a2010-04-28 09:27:14 +0000938 if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
939 rc = -EIO;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000940 goto fail;
Ben Hutchings00bbb4a2010-04-28 09:27:14 +0000941 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000942
943 if (was_attached != NULL)
944 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
945 return 0;
946
947fail:
Ben Hutchings62776d02010-06-23 11:30:07 +0000948 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000949 return rc;
950}
951
952int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
Matthew Slattery6aa9c7f2010-07-14 15:36:19 +0100953 u16 *fw_subtype_list, u32 *capabilities)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000954{
Ben Hutchings59cfc472012-09-14 17:30:10 +0100955 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
Ben Hutchingsc5bb0e92012-09-14 17:31:33 +0100956 size_t outlen, i;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000957 int port_num = efx_port_num(efx);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000958 int rc;
959
960 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
961
962 rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
963 outbuf, sizeof(outbuf), &outlen);
964 if (rc)
965 goto fail;
966
Ben Hutchings05a93202011-12-20 00:44:06 +0000967 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
Ben Hutchings00bbb4a2010-04-28 09:27:14 +0000968 rc = -EIO;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000969 goto fail;
970 }
971
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000972 if (mac_address)
Ben Hutchingsc5bb0e92012-09-14 17:31:33 +0100973 memcpy(mac_address,
974 port_num ?
975 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
976 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
977 ETH_ALEN);
Ben Hutchingsbfeed902012-09-07 00:58:10 +0100978 if (fw_subtype_list) {
Ben Hutchingsbfeed902012-09-07 00:58:10 +0100979 for (i = 0;
Ben Hutchingsc5bb0e92012-09-14 17:31:33 +0100980 i < MCDI_VAR_ARRAY_LEN(outlen,
981 GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
982 i++)
983 fw_subtype_list[i] = MCDI_ARRAY_WORD(
984 outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
985 for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
986 fw_subtype_list[i] = 0;
Ben Hutchingsbfeed902012-09-07 00:58:10 +0100987 }
Matthew Slattery6aa9c7f2010-07-14 15:36:19 +0100988 if (capabilities) {
989 if (port_num)
990 *capabilities = MCDI_DWORD(outbuf,
991 GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
992 else
993 *capabilities = MCDI_DWORD(outbuf,
994 GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
995 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000996
997 return 0;
998
999fail:
Ben Hutchings62776d02010-06-23 11:30:07 +00001000 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
1001 __func__, rc, (int)outlen);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001002
1003 return rc;
1004}
1005
1006int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
1007{
Ben Hutchings59cfc472012-09-14 17:30:10 +01001008 MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001009 u32 dest = 0;
1010 int rc;
1011
1012 if (uart)
1013 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
1014 if (evq)
1015 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
1016
1017 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
1018 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
1019
1020 BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
1021
1022 rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
1023 NULL, 0, NULL);
1024 if (rc)
1025 goto fail;
1026
1027 return 0;
1028
1029fail:
Ben Hutchings62776d02010-06-23 11:30:07 +00001030 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001031 return rc;
1032}
1033
1034int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
1035{
Ben Hutchings59cfc472012-09-14 17:30:10 +01001036 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001037 size_t outlen;
1038 int rc;
1039
1040 BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
1041
1042 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
1043 outbuf, sizeof(outbuf), &outlen);
1044 if (rc)
1045 goto fail;
Ben Hutchings00bbb4a2010-04-28 09:27:14 +00001046 if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
1047 rc = -EIO;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001048 goto fail;
Ben Hutchings00bbb4a2010-04-28 09:27:14 +00001049 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001050
1051 *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
1052 return 0;
1053
1054fail:
Ben Hutchings62776d02010-06-23 11:30:07 +00001055 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
1056 __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001057 return rc;
1058}
1059
1060int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
1061 size_t *size_out, size_t *erase_size_out,
1062 bool *protected_out)
1063{
Ben Hutchings59cfc472012-09-14 17:30:10 +01001064 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
1065 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001066 size_t outlen;
1067 int rc;
1068
1069 MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
1070
1071 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
1072 outbuf, sizeof(outbuf), &outlen);
1073 if (rc)
1074 goto fail;
Ben Hutchings00bbb4a2010-04-28 09:27:14 +00001075 if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
1076 rc = -EIO;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001077 goto fail;
Ben Hutchings00bbb4a2010-04-28 09:27:14 +00001078 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001079
1080 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
1081 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
1082 *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
Ben Hutchings05a93202011-12-20 00:44:06 +00001083 (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001084 return 0;
1085
1086fail:
Ben Hutchings62776d02010-06-23 11:30:07 +00001087 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001088 return rc;
1089}
1090
Ben Hutchings2e803402010-02-03 09:31:01 +00001091static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
1092{
Ben Hutchings59cfc472012-09-14 17:30:10 +01001093 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
1094 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
Ben Hutchings2e803402010-02-03 09:31:01 +00001095 int rc;
1096
1097 MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
1098
1099 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
1100 outbuf, sizeof(outbuf), NULL);
1101 if (rc)
1102 return rc;
1103
1104 switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
1105 case MC_CMD_NVRAM_TEST_PASS:
1106 case MC_CMD_NVRAM_TEST_NOTSUPP:
1107 return 0;
1108 default:
1109 return -EIO;
1110 }
1111}
1112
1113int efx_mcdi_nvram_test_all(struct efx_nic *efx)
1114{
1115 u32 nvram_types;
1116 unsigned int type;
1117 int rc;
1118
1119 rc = efx_mcdi_nvram_types(efx, &nvram_types);
1120 if (rc)
Ben Hutchingsb548a982010-04-28 09:28:36 +00001121 goto fail1;
Ben Hutchings2e803402010-02-03 09:31:01 +00001122
1123 type = 0;
1124 while (nvram_types != 0) {
1125 if (nvram_types & 1) {
1126 rc = efx_mcdi_nvram_test(efx, type);
1127 if (rc)
Ben Hutchingsb548a982010-04-28 09:28:36 +00001128 goto fail2;
Ben Hutchings2e803402010-02-03 09:31:01 +00001129 }
1130 type++;
1131 nvram_types >>= 1;
1132 }
1133
1134 return 0;
Ben Hutchingsb548a982010-04-28 09:28:36 +00001135
1136fail2:
Ben Hutchings62776d02010-06-23 11:30:07 +00001137 netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
1138 __func__, type);
Ben Hutchingsb548a982010-04-28 09:28:36 +00001139fail1:
Ben Hutchings62776d02010-06-23 11:30:07 +00001140 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingsb548a982010-04-28 09:28:36 +00001141 return rc;
Ben Hutchings2e803402010-02-03 09:31:01 +00001142}
1143
Steve Hodgson8b2103a2010-02-03 09:30:17 +00001144static int efx_mcdi_read_assertion(struct efx_nic *efx)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001145{
Ben Hutchings59cfc472012-09-14 17:30:10 +01001146 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
1147 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
Ben Hutchingsc5bb0e92012-09-14 17:31:33 +01001148 unsigned int flags, index;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001149 const char *reason;
1150 size_t outlen;
1151 int retry;
1152 int rc;
1153
Steve Hodgson8b2103a2010-02-03 09:30:17 +00001154 /* Attempt to read any stored assertion state before we reboot
1155 * the mcfw out of the assertion handler. Retry twice, once
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001156 * because a boot-time assertion might cause this command to fail
1157 * with EINTR. And once again because GET_ASSERTS can race with
1158 * MC_CMD_REBOOT running on the other port. */
1159 retry = 2;
1160 do {
Steve Hodgson8b2103a2010-02-03 09:30:17 +00001161 MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001162 rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
Steve Hodgson8b2103a2010-02-03 09:30:17 +00001163 inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
1164 outbuf, sizeof(outbuf), &outlen);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001165 } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
1166
1167 if (rc)
1168 return rc;
1169 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
Ben Hutchings00bbb4a2010-04-28 09:27:14 +00001170 return -EIO;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001171
Steve Hodgson8b2103a2010-02-03 09:30:17 +00001172 /* Print out any recorded assertion state */
1173 flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001174 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
1175 return 0;
1176
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001177 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
1178 ? "system-level assertion"
1179 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
1180 ? "thread-level assertion"
1181 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
1182 ? "watchdog reset"
1183 : "unknown assertion";
Ben Hutchings62776d02010-06-23 11:30:07 +00001184 netif_err(efx, hw, efx->net_dev,
1185 "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
1186 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
1187 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001188
1189 /* Print out the registers */
Ben Hutchingsc5bb0e92012-09-14 17:31:33 +01001190 for (index = 0;
1191 index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
1192 index++)
1193 netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
1194 1 + index,
1195 MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
1196 index));
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001197
1198 return 0;
1199}
1200
Steve Hodgson8b2103a2010-02-03 09:30:17 +00001201static void efx_mcdi_exit_assertion(struct efx_nic *efx)
1202{
Ben Hutchings59cfc472012-09-14 17:30:10 +01001203 MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
Steve Hodgson8b2103a2010-02-03 09:30:17 +00001204
Ben Hutchings0f1e54a2012-07-02 23:37:40 +01001205 /* If the MC is running debug firmware, it might now be
1206 * waiting for a debugger to attach, but we just want it to
1207 * reboot. We set a flag that makes the command a no-op if it
1208 * has already done so. We don't know what return code to
1209 * expect (0 or -EIO), so ignore it.
1210 */
Steve Hodgson8b2103a2010-02-03 09:30:17 +00001211 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
1212 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
1213 MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
Ben Hutchings0f1e54a2012-07-02 23:37:40 +01001214 (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
1215 NULL, 0, NULL);
Steve Hodgson8b2103a2010-02-03 09:30:17 +00001216}
1217
1218int efx_mcdi_handle_assertion(struct efx_nic *efx)
1219{
1220 int rc;
1221
1222 rc = efx_mcdi_read_assertion(efx);
1223 if (rc)
1224 return rc;
1225
1226 efx_mcdi_exit_assertion(efx);
1227
1228 return 0;
1229}
1230
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001231void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1232{
Ben Hutchings59cfc472012-09-14 17:30:10 +01001233 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001234 int rc;
1235
1236 BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
1237 BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
1238 BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
1239
1240 BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
1241
1242 MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
1243
1244 rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
1245 NULL, 0, NULL);
1246 if (rc)
Ben Hutchings62776d02010-06-23 11:30:07 +00001247 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
1248 __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001249}
1250
Ben Hutchings6bff8612012-09-18 02:33:52 +01001251static int efx_mcdi_reset_port(struct efx_nic *efx)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001252{
Ben Hutchings05a93202011-12-20 00:44:06 +00001253 int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001254 if (rc)
Ben Hutchings62776d02010-06-23 11:30:07 +00001255 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
1256 __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001257 return rc;
1258}
1259
Ben Hutchings6bff8612012-09-18 02:33:52 +01001260static int efx_mcdi_reset_mc(struct efx_nic *efx)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001261{
Ben Hutchings59cfc472012-09-14 17:30:10 +01001262 MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001263 int rc;
1264
1265 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
1266 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
1267 rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
1268 NULL, 0, NULL);
1269 /* White is black, and up is down */
1270 if (rc == -EIO)
1271 return 0;
1272 if (rc == 0)
1273 rc = -EIO;
Ben Hutchings62776d02010-06-23 11:30:07 +00001274 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001275 return rc;
1276}
1277
Ben Hutchings6bff8612012-09-18 02:33:52 +01001278enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
1279{
1280 return RESET_TYPE_RECOVER_OR_ALL;
1281}
1282
1283int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
1284{
1285 int rc;
1286
1287 /* Recover from a failed assertion pre-reset */
1288 rc = efx_mcdi_handle_assertion(efx);
1289 if (rc)
1290 return rc;
1291
1292 if (method == RESET_TYPE_WORLD)
1293 return efx_mcdi_reset_mc(efx);
1294 else
1295 return efx_mcdi_reset_port(efx);
1296}
1297
stephen hemmingerd2156972010-10-18 05:27:31 +00001298static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1299 const u8 *mac, int *id_out)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001300{
Ben Hutchings59cfc472012-09-14 17:30:10 +01001301 MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
1302 MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001303 size_t outlen;
1304 int rc;
1305
1306 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
1307 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
1308 MC_CMD_FILTER_MODE_SIMPLE);
1309 memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
1310
1311 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
1312 outbuf, sizeof(outbuf), &outlen);
1313 if (rc)
1314 goto fail;
1315
1316 if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
Ben Hutchings00bbb4a2010-04-28 09:27:14 +00001317 rc = -EIO;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001318 goto fail;
1319 }
1320
1321 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
1322
1323 return 0;
1324
1325fail:
1326 *id_out = -1;
Ben Hutchings62776d02010-06-23 11:30:07 +00001327 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001328 return rc;
1329
1330}
1331
1332
1333int
1334efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
1335{
1336 return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
1337}
1338
1339
1340int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
1341{
Ben Hutchings59cfc472012-09-14 17:30:10 +01001342 MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001343 size_t outlen;
1344 int rc;
1345
1346 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
1347 outbuf, sizeof(outbuf), &outlen);
1348 if (rc)
1349 goto fail;
1350
1351 if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
Ben Hutchings00bbb4a2010-04-28 09:27:14 +00001352 rc = -EIO;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001353 goto fail;
1354 }
1355
1356 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
1357
1358 return 0;
1359
1360fail:
1361 *id_out = -1;
Ben Hutchings62776d02010-06-23 11:30:07 +00001362 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001363 return rc;
1364}
1365
1366
1367int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
1368{
Ben Hutchings59cfc472012-09-14 17:30:10 +01001369 MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001370 int rc;
1371
1372 MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
1373
1374 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
1375 NULL, 0, NULL);
1376 if (rc)
1377 goto fail;
1378
1379 return 0;
1380
1381fail:
Ben Hutchings62776d02010-06-23 11:30:07 +00001382 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001383 return rc;
1384}
1385
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001386int efx_mcdi_flush_rxqs(struct efx_nic *efx)
1387{
1388 struct efx_channel *channel;
1389 struct efx_rx_queue *rx_queue;
Ben Hutchingsc5bb0e92012-09-14 17:31:33 +01001390 MCDI_DECLARE_BUF(inbuf,
1391 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001392 int rc, count;
1393
Ben Hutchings45078372012-09-19 02:53:34 +01001394 BUILD_BUG_ON(EFX_MAX_CHANNELS >
1395 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
1396
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001397 count = 0;
1398 efx_for_each_channel(channel, efx) {
1399 efx_for_each_channel_rx_queue(rx_queue, channel) {
1400 if (rx_queue->flush_pending) {
1401 rx_queue->flush_pending = false;
1402 atomic_dec(&efx->rxq_flush_pending);
Ben Hutchingsc5bb0e92012-09-14 17:31:33 +01001403 MCDI_SET_ARRAY_DWORD(
1404 inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
1405 count, efx_rx_queue_index(rx_queue));
1406 count++;
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001407 }
1408 }
1409 }
1410
Ben Hutchingsc5bb0e92012-09-14 17:31:33 +01001411 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
1412 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
Ben Hutchingsbbec9692012-09-11 18:25:13 +01001413 WARN_ON(rc < 0);
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001414
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001415 return rc;
1416}
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001417
1418int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
1419{
1420 int rc;
1421
1422 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
1423 if (rc)
1424 goto fail;
1425
1426 return 0;
1427
1428fail:
Ben Hutchings62776d02010-06-23 11:30:07 +00001429 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001430 return rc;
1431}
1432
Ben Hutchings45a3fd52012-11-28 04:38:14 +00001433#ifdef CONFIG_SFC_MTD
1434
1435#define EFX_MCDI_NVRAM_LEN_MAX 128
1436
1437static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
1438{
1439 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
1440 int rc;
1441
1442 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
1443
1444 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
1445
1446 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
1447 NULL, 0, NULL);
1448 if (rc)
1449 goto fail;
1450
1451 return 0;
1452
1453fail:
1454 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1455 return rc;
1456}
1457
1458static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
1459 loff_t offset, u8 *buffer, size_t length)
1460{
1461 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
1462 MCDI_DECLARE_BUF(outbuf,
1463 MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
1464 size_t outlen;
1465 int rc;
1466
1467 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
1468 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
1469 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
1470
1471 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
1472 outbuf, sizeof(outbuf), &outlen);
1473 if (rc)
1474 goto fail;
1475
1476 memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
1477 return 0;
1478
1479fail:
1480 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1481 return rc;
1482}
1483
1484static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
1485 loff_t offset, const u8 *buffer, size_t length)
1486{
1487 MCDI_DECLARE_BUF(inbuf,
1488 MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
1489 int rc;
1490
1491 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
1492 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
1493 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
1494 memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
1495
1496 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
1497
1498 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
1499 ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
1500 NULL, 0, NULL);
1501 if (rc)
1502 goto fail;
1503
1504 return 0;
1505
1506fail:
1507 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1508 return rc;
1509}
1510
1511static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
1512 loff_t offset, size_t length)
1513{
1514 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
1515 int rc;
1516
1517 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
1518 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
1519 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
1520
1521 BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
1522
1523 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
1524 NULL, 0, NULL);
1525 if (rc)
1526 goto fail;
1527
1528 return 0;
1529
1530fail:
1531 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1532 return rc;
1533}
1534
1535static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
1536{
1537 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
1538 int rc;
1539
1540 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
1541
1542 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
1543
1544 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
1545 NULL, 0, NULL);
1546 if (rc)
1547 goto fail;
1548
1549 return 0;
1550
1551fail:
1552 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1553 return rc;
1554}
1555
1556int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
1557 size_t len, size_t *retlen, u8 *buffer)
1558{
1559 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1560 struct efx_nic *efx = mtd->priv;
1561 loff_t offset = start;
1562 loff_t end = min_t(loff_t, start + len, mtd->size);
1563 size_t chunk;
1564 int rc = 0;
1565
1566 while (offset < end) {
1567 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
1568 rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
1569 buffer, chunk);
1570 if (rc)
1571 goto out;
1572 offset += chunk;
1573 buffer += chunk;
1574 }
1575out:
1576 *retlen = offset - start;
1577 return rc;
1578}
1579
1580int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
1581{
1582 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1583 struct efx_nic *efx = mtd->priv;
1584 loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
1585 loff_t end = min_t(loff_t, start + len, mtd->size);
1586 size_t chunk = part->common.mtd.erasesize;
1587 int rc = 0;
1588
1589 if (!part->updating) {
1590 rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
1591 if (rc)
1592 goto out;
1593 part->updating = true;
1594 }
1595
1596 /* The MCDI interface can in fact do multiple erase blocks at once;
1597 * but erasing may be slow, so we make multiple calls here to avoid
1598 * tripping the MCDI RPC timeout. */
1599 while (offset < end) {
1600 rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
1601 chunk);
1602 if (rc)
1603 goto out;
1604 offset += chunk;
1605 }
1606out:
1607 return rc;
1608}
1609
1610int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
1611 size_t len, size_t *retlen, const u8 *buffer)
1612{
1613 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1614 struct efx_nic *efx = mtd->priv;
1615 loff_t offset = start;
1616 loff_t end = min_t(loff_t, start + len, mtd->size);
1617 size_t chunk;
1618 int rc = 0;
1619
1620 if (!part->updating) {
1621 rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
1622 if (rc)
1623 goto out;
1624 part->updating = true;
1625 }
1626
1627 while (offset < end) {
1628 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
1629 rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
1630 buffer, chunk);
1631 if (rc)
1632 goto out;
1633 offset += chunk;
1634 buffer += chunk;
1635 }
1636out:
1637 *retlen = offset - start;
1638 return rc;
1639}
1640
1641int efx_mcdi_mtd_sync(struct mtd_info *mtd)
1642{
1643 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1644 struct efx_nic *efx = mtd->priv;
1645 int rc = 0;
1646
1647 if (part->updating) {
1648 part->updating = false;
1649 rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
1650 }
1651
1652 return rc;
1653}
1654
1655void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
1656{
1657 struct efx_mcdi_mtd_partition *mcdi_part =
1658 container_of(part, struct efx_mcdi_mtd_partition, common);
1659 struct efx_nic *efx = part->mtd.priv;
1660
1661 snprintf(part->name, sizeof(part->name), "%s %s:%02x",
1662 efx->name, part->type_name, mcdi_part->fw_subtype);
1663}
1664
1665#endif /* CONFIG_SFC_MTD */