blob: f2c90997fabdcdc9e2e82c6d8642fb45760d6d50 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020
Ajit Khaparde609ff3b2011-02-20 11:42:07 +000021/* Must be a power of 2 or else MODULO will BUG_ON */
22static int be_get_temp_freq = 32;
23
Sathya Perla8788fdc2009-07-27 22:52:03 +000024static void be_mcc_notify(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +000025{
Sathya Perla8788fdc2009-07-27 22:52:03 +000026 struct be_queue_info *mccq = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +000027 u32 val = 0;
28
Ajit Khaparde7acc2082011-02-11 13:38:17 +000029 if (adapter->eeh_err) {
30 dev_info(&adapter->pdev->dev,
31 "Error in Card Detected! Cannot issue commands\n");
32 return;
33 }
34
Sathya Perla5fb379e2009-06-18 00:02:59 +000035 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
36 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +000037
38 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +000039 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
Sathya Perla5fb379e2009-06-18 00:02:59 +000040}
41
42/* To check if valid bit is set, check the entire word as we don't know
43 * the endianness of the data (old entry is host endian while a new entry is
44 * little endian) */
Sathya Perlaefd2e402009-07-27 22:53:10 +000045static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +000046{
47 if (compl->flags != 0) {
48 compl->flags = le32_to_cpu(compl->flags);
49 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
50 return true;
51 } else {
52 return false;
53 }
54}
55
56/* Need to reset the entire word that houses the valid bit */
Sathya Perlaefd2e402009-07-27 22:53:10 +000057static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +000058{
59 compl->flags = 0;
60}
61
Sathya Perla8788fdc2009-07-27 22:52:03 +000062static int be_mcc_compl_process(struct be_adapter *adapter,
Sathya Perlaefd2e402009-07-27 22:53:10 +000063 struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +000064{
65 u16 compl_status, extd_status;
66
67 /* Just swap the status to host endian; mcc tag is opaquely copied
68 * from mcc_wrb */
69 be_dws_le_to_cpu(compl, 4);
70
71 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
72 CQE_STATUS_COMPL_MASK;
Sarveshwar Bandidd131e72010-05-25 16:16:32 -070073
74 if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
75 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
76 adapter->flash_status = compl_status;
77 complete(&adapter->flash_compl);
78 }
79
Sathya Perlab31c50a2009-09-17 10:30:13 -070080 if (compl_status == MCC_STATUS_SUCCESS) {
Ajit Khaparde63499352011-04-19 12:11:02 +000081 if ((compl->tag0 == OPCODE_ETH_GET_STATISTICS) &&
82 (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
Sathya Perlab31c50a2009-09-17 10:30:13 -070083 struct be_cmd_resp_get_stats *resp =
Sathya Perla3abcded2010-10-03 22:12:27 -070084 adapter->stats_cmd.va;
Sathya Perlab31c50a2009-09-17 10:30:13 -070085 be_dws_le_to_cpu(&resp->hw_stats,
86 sizeof(resp->hw_stats));
87 netdev_stats_update(adapter);
Ajit Khapardeb2aebe62011-02-20 11:41:39 +000088 adapter->stats_cmd_sent = false;
Sathya Perlab31c50a2009-09-17 10:30:13 -070089 }
Ajit Khaparde89438072010-07-23 12:42:40 -070090 } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
91 (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
Sathya Perla5fb379e2009-06-18 00:02:59 +000092 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
93 CQE_STATUS_EXTD_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +000094 dev_warn(&adapter->pdev->dev,
Ajit Khaparded744b442009-12-03 06:12:06 +000095 "Error in cmd completion - opcode %d, compl %d, extd %d\n",
96 compl->tag0, compl_status, extd_status);
Sathya Perla5fb379e2009-06-18 00:02:59 +000097 }
Sathya Perlab31c50a2009-09-17 10:30:13 -070098 return compl_status;
Sathya Perla5fb379e2009-06-18 00:02:59 +000099}
100
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000101/* Link state evt is a string of bytes; no need for endian swapping */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000102static void be_async_link_state_process(struct be_adapter *adapter,
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000103 struct be_async_event_link_state *evt)
104{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000105 be_link_status_update(adapter,
106 evt->port_link_status == ASYNC_EVENT_LINK_UP);
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000107}
108
Somnath Koturcc4ce022010-10-21 07:11:14 -0700109/* Grp5 CoS Priority evt */
110static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
111 struct be_async_event_grp5_cos_priority *evt)
112{
113 if (evt->valid) {
114 adapter->vlan_prio_bmap = evt->available_priority_bmap;
Ajit Khaparde60964dd2011-02-11 13:37:25 +0000115 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700116 adapter->recommended_prio =
117 evt->reco_default_priority << VLAN_PRIO_SHIFT;
118 }
119}
120
121/* Grp5 QOS Speed evt */
122static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
123 struct be_async_event_grp5_qos_link_speed *evt)
124{
125 if (evt->physical_port == adapter->port_num) {
126 /* qos_link_speed is in units of 10 Mbps */
127 adapter->link_speed = evt->qos_link_speed * 10;
128 }
129}
130
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000131/*Grp5 PVID evt*/
132static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
133 struct be_async_event_grp5_pvid_state *evt)
134{
135 if (evt->enabled)
Somnath Kotur6709d952011-05-04 22:40:46 +0000136 adapter->pvid = le16_to_cpu(evt->tag);
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000137 else
138 adapter->pvid = 0;
139}
140
Somnath Koturcc4ce022010-10-21 07:11:14 -0700141static void be_async_grp5_evt_process(struct be_adapter *adapter,
142 u32 trailer, struct be_mcc_compl *evt)
143{
144 u8 event_type = 0;
145
146 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
147 ASYNC_TRAILER_EVENT_TYPE_MASK;
148
149 switch (event_type) {
150 case ASYNC_EVENT_COS_PRIORITY:
151 be_async_grp5_cos_priority_process(adapter,
152 (struct be_async_event_grp5_cos_priority *)evt);
153 break;
154 case ASYNC_EVENT_QOS_SPEED:
155 be_async_grp5_qos_speed_process(adapter,
156 (struct be_async_event_grp5_qos_link_speed *)evt);
157 break;
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000158 case ASYNC_EVENT_PVID_STATE:
159 be_async_grp5_pvid_state_process(adapter,
160 (struct be_async_event_grp5_pvid_state *)evt);
161 break;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700162 default:
163 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
164 break;
165 }
166}
167
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000168static inline bool is_link_state_evt(u32 trailer)
169{
Eric Dumazet807540b2010-09-23 05:40:09 +0000170 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000171 ASYNC_TRAILER_EVENT_CODE_MASK) ==
Eric Dumazet807540b2010-09-23 05:40:09 +0000172 ASYNC_EVENT_CODE_LINK_STATE;
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000173}
Sathya Perla5fb379e2009-06-18 00:02:59 +0000174
Somnath Koturcc4ce022010-10-21 07:11:14 -0700175static inline bool is_grp5_evt(u32 trailer)
176{
177 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
178 ASYNC_TRAILER_EVENT_CODE_MASK) ==
179 ASYNC_EVENT_CODE_GRP_5);
180}
181
Sathya Perlaefd2e402009-07-27 22:53:10 +0000182static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000183{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
Sathya Perlaefd2e402009-07-27 22:53:10 +0000185 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000186
187 if (be_mcc_compl_is_new(compl)) {
188 queue_tail_inc(mcc_cq);
189 return compl;
190 }
191 return NULL;
192}
193
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000194void be_async_mcc_enable(struct be_adapter *adapter)
195{
196 spin_lock_bh(&adapter->mcc_cq_lock);
197
198 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
199 adapter->mcc_obj.rearm_cq = true;
200
201 spin_unlock_bh(&adapter->mcc_cq_lock);
202}
203
204void be_async_mcc_disable(struct be_adapter *adapter)
205{
206 adapter->mcc_obj.rearm_cq = false;
207}
208
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800209int be_process_mcc(struct be_adapter *adapter, int *status)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000210{
Sathya Perlaefd2e402009-07-27 22:53:10 +0000211 struct be_mcc_compl *compl;
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800212 int num = 0;
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000213 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000214
Sathya Perla8788fdc2009-07-27 22:52:03 +0000215 spin_lock_bh(&adapter->mcc_cq_lock);
216 while ((compl = be_mcc_compl_get(adapter))) {
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000217 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
218 /* Interpret flags as an async trailer */
Ajit Khaparde323f30b2010-09-03 06:24:13 +0000219 if (is_link_state_evt(compl->flags))
220 be_async_link_state_process(adapter,
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000221 (struct be_async_event_link_state *) compl);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700222 else if (is_grp5_evt(compl->flags))
223 be_async_grp5_evt_process(adapter,
224 compl->flags, compl);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700225 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800226 *status = be_mcc_compl_process(adapter, compl);
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000227 atomic_dec(&mcc_obj->q.used);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000228 }
229 be_mcc_compl_use(compl);
230 num++;
231 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700232
Sathya Perla8788fdc2009-07-27 22:52:03 +0000233 spin_unlock_bh(&adapter->mcc_cq_lock);
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800234 return num;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000235}
236
Sathya Perla6ac7b682009-06-18 00:05:54 +0000237/* Wait till no more pending mcc requests are present */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700238static int be_mcc_wait_compl(struct be_adapter *adapter)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000239{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700240#define mcc_timeout 120000 /* 12s timeout */
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800241 int i, num, status = 0;
242 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700243
Ajit Khaparde7acc2082011-02-11 13:38:17 +0000244 if (adapter->eeh_err)
245 return -EIO;
246
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800247 for (i = 0; i < mcc_timeout; i++) {
248 num = be_process_mcc(adapter, &status);
249 if (num)
250 be_cq_notify(adapter, mcc_obj->cq.id,
251 mcc_obj->rearm_cq, num);
252
253 if (atomic_read(&mcc_obj->q.used) == 0)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000254 break;
255 udelay(100);
256 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700257 if (i == mcc_timeout) {
Sathya Perla5f0b8492009-07-27 22:52:56 +0000258 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
Sathya Perlab31c50a2009-09-17 10:30:13 -0700259 return -1;
260 }
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800261 return status;
Sathya Perla6ac7b682009-06-18 00:05:54 +0000262}
263
264/* Notify MCC requests and wait for completion */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700265static int be_mcc_notify_wait(struct be_adapter *adapter)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000266{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000267 be_mcc_notify(adapter);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700268 return be_mcc_wait_compl(adapter);
Sathya Perla6ac7b682009-06-18 00:05:54 +0000269}
270
Sathya Perla5f0b8492009-07-27 22:52:56 +0000271static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700272{
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000273 int msecs = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700274 u32 ready;
275
Ajit Khaparde7acc2082011-02-11 13:38:17 +0000276 if (adapter->eeh_err) {
277 dev_err(&adapter->pdev->dev,
278 "Error detected in card.Cannot issue commands\n");
279 return -EIO;
280 }
281
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700282 do {
Sathya Perlacf588472010-02-14 21:22:01 +0000283 ready = ioread32(db);
284 if (ready == 0xffffffff) {
285 dev_err(&adapter->pdev->dev,
286 "pci slot disconnected\n");
287 return -1;
288 }
289
290 ready &= MPU_MAILBOX_DB_RDY_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700291 if (ready)
292 break;
293
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000294 if (msecs > 4000) {
Sathya Perla5f0b8492009-07-27 22:52:56 +0000295 dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
Padmanabh Ratnakar18a91e62011-05-10 05:13:01 +0000296 if (!lancer_chip(adapter))
297 be_detect_dump_ue(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700298 return -1;
299 }
300
Sathya Perla1dbf53a2011-05-12 19:32:16 +0000301 msleep(1);
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000302 msecs++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700303 } while (true);
304
305 return 0;
306}
307
308/*
309 * Insert the mailbox address into the doorbell in two steps
Sathya Perla5fb379e2009-06-18 00:02:59 +0000310 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700311 */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700312static int be_mbox_notify_wait(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700313{
314 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700315 u32 val = 0;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000316 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
317 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700318 struct be_mcc_mailbox *mbox = mbox_mem->va;
Sathya Perlaefd2e402009-07-27 22:53:10 +0000319 struct be_mcc_compl *compl = &mbox->compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700320
Sathya Perlacf588472010-02-14 21:22:01 +0000321 /* wait for ready to be set */
322 status = be_mbox_db_ready_wait(adapter, db);
323 if (status != 0)
324 return status;
325
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700326 val |= MPU_MAILBOX_DB_HI_MASK;
327 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
328 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
329 iowrite32(val, db);
330
331 /* wait for ready to be set */
Sathya Perla5f0b8492009-07-27 22:52:56 +0000332 status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700333 if (status != 0)
334 return status;
335
336 val = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700337 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
338 val |= (u32)(mbox_mem->dma >> 4) << 2;
339 iowrite32(val, db);
340
Sathya Perla5f0b8492009-07-27 22:52:56 +0000341 status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700342 if (status != 0)
343 return status;
344
Sathya Perla5fb379e2009-06-18 00:02:59 +0000345 /* A cq entry has been made now */
Sathya Perlaefd2e402009-07-27 22:53:10 +0000346 if (be_mcc_compl_is_new(compl)) {
347 status = be_mcc_compl_process(adapter, &mbox->compl);
348 be_mcc_compl_use(compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000349 if (status)
350 return status;
351 } else {
Sathya Perla5f0b8492009-07-27 22:52:56 +0000352 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700353 return -1;
354 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000355 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700356}
357
Sathya Perla8788fdc2009-07-27 22:52:03 +0000358static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700359{
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000360 u32 sem;
361
362 if (lancer_chip(adapter))
363 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
364 else
365 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700366
367 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
368 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
369 return -1;
370 else
371 return 0;
372}
373
Sathya Perla8788fdc2009-07-27 22:52:03 +0000374int be_cmd_POST(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700375{
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000376 u16 stage;
377 int status, timeout = 0;
Sathya Perla6ed35ee2011-05-12 19:32:15 +0000378 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700379
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000380 do {
381 status = be_POST_stage_get(adapter, &stage);
382 if (status) {
Sathya Perla6ed35ee2011-05-12 19:32:15 +0000383 dev_err(dev, "POST error; stage=0x%x\n", stage);
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000384 return -1;
385 } else if (stage != POST_STAGE_ARMFW_RDY) {
Sathya Perla6ed35ee2011-05-12 19:32:15 +0000386 if (msleep_interruptible(2000)) {
387 dev_err(dev, "Waiting for POST aborted\n");
388 return -EINTR;
389 }
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000390 timeout += 2;
391 } else {
392 return 0;
393 }
Sathya Perlad938a702010-05-26 00:33:43 -0700394 } while (timeout < 40);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700395
Sathya Perla6ed35ee2011-05-12 19:32:15 +0000396 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000397 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700398}
399
400static inline void *embedded_payload(struct be_mcc_wrb *wrb)
401{
402 return wrb->payload.embedded_payload;
403}
404
405static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
406{
407 return &wrb->payload.sgl[0];
408}
409
410/* Don't touch the hdr after it's prepared */
411static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
Ajit Khaparded744b442009-12-03 06:12:06 +0000412 bool embedded, u8 sge_cnt, u32 opcode)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700413{
414 if (embedded)
415 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
416 else
417 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
418 MCC_WRB_SGE_CNT_SHIFT;
419 wrb->payload_length = payload_len;
Ajit Khaparded744b442009-12-03 06:12:06 +0000420 wrb->tag0 = opcode;
Sathya Perlafa4281b2010-01-21 22:51:36 +0000421 be_dws_cpu_to_le(wrb, 8);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700422}
423
424/* Don't touch the hdr after it's prepared */
425static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
426 u8 subsystem, u8 opcode, int cmd_len)
427{
428 req_hdr->opcode = opcode;
429 req_hdr->subsystem = subsystem;
430 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
Ajit Khaparde07793d32010-02-16 00:18:46 +0000431 req_hdr->version = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700432}
433
434static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
435 struct be_dma_mem *mem)
436{
437 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
438 u64 dma = (u64)mem->dma;
439
440 for (i = 0; i < buf_pages; i++) {
441 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
442 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
443 dma += PAGE_SIZE_4K;
444 }
445}
446
447/* Converts interrupt delay in microseconds to multiplier value */
448static u32 eq_delay_to_mult(u32 usec_delay)
449{
450#define MAX_INTR_RATE 651042
451 const u32 round = 10;
452 u32 multiplier;
453
454 if (usec_delay == 0)
455 multiplier = 0;
456 else {
457 u32 interrupt_rate = 1000000 / usec_delay;
458 /* Max delay, corresponding to the lowest interrupt rate */
459 if (interrupt_rate == 0)
460 multiplier = 1023;
461 else {
462 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
463 multiplier /= interrupt_rate;
464 /* Round the multiplier to the closest value.*/
465 multiplier = (multiplier + round/2) / round;
466 multiplier = min(multiplier, (u32)1023);
467 }
468 }
469 return multiplier;
470}
471
Sathya Perlab31c50a2009-09-17 10:30:13 -0700472static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700473{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700474 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
475 struct be_mcc_wrb *wrb
476 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
477 memset(wrb, 0, sizeof(*wrb));
478 return wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700479}
480
Sathya Perlab31c50a2009-09-17 10:30:13 -0700481static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000482{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700483 struct be_queue_info *mccq = &adapter->mcc_obj.q;
484 struct be_mcc_wrb *wrb;
485
Sathya Perla713d03942009-11-22 22:02:45 +0000486 if (atomic_read(&mccq->used) >= mccq->len) {
487 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
488 return NULL;
489 }
490
Sathya Perlab31c50a2009-09-17 10:30:13 -0700491 wrb = queue_head_node(mccq);
492 queue_head_inc(mccq);
493 atomic_inc(&mccq->used);
494 memset(wrb, 0, sizeof(*wrb));
Sathya Perla5fb379e2009-06-18 00:02:59 +0000495 return wrb;
496}
497
Sathya Perla2243e2e2009-11-22 22:02:03 +0000498/* Tell fw we're about to start firing cmds by writing a
499 * special pattern across the wrb hdr; uses mbox
500 */
501int be_cmd_fw_init(struct be_adapter *adapter)
502{
503 u8 *wrb;
504 int status;
505
Ivan Vecera29849612010-12-14 05:43:19 +0000506 if (mutex_lock_interruptible(&adapter->mbox_lock))
507 return -1;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000508
509 wrb = (u8 *)wrb_from_mbox(adapter);
Sathya Perla359a9722010-12-01 01:03:36 +0000510 *wrb++ = 0xFF;
511 *wrb++ = 0x12;
512 *wrb++ = 0x34;
513 *wrb++ = 0xFF;
514 *wrb++ = 0xFF;
515 *wrb++ = 0x56;
516 *wrb++ = 0x78;
517 *wrb = 0xFF;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000518
519 status = be_mbox_notify_wait(adapter);
520
Ivan Vecera29849612010-12-14 05:43:19 +0000521 mutex_unlock(&adapter->mbox_lock);
Sathya Perla2243e2e2009-11-22 22:02:03 +0000522 return status;
523}
524
525/* Tell fw we're done with firing cmds by writing a
526 * special pattern across the wrb hdr; uses mbox
527 */
528int be_cmd_fw_clean(struct be_adapter *adapter)
529{
530 u8 *wrb;
531 int status;
532
Sathya Perlacf588472010-02-14 21:22:01 +0000533 if (adapter->eeh_err)
534 return -EIO;
535
Ivan Vecera29849612010-12-14 05:43:19 +0000536 if (mutex_lock_interruptible(&adapter->mbox_lock))
537 return -1;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000538
539 wrb = (u8 *)wrb_from_mbox(adapter);
540 *wrb++ = 0xFF;
541 *wrb++ = 0xAA;
542 *wrb++ = 0xBB;
543 *wrb++ = 0xFF;
544 *wrb++ = 0xFF;
545 *wrb++ = 0xCC;
546 *wrb++ = 0xDD;
547 *wrb = 0xFF;
548
549 status = be_mbox_notify_wait(adapter);
550
Ivan Vecera29849612010-12-14 05:43:19 +0000551 mutex_unlock(&adapter->mbox_lock);
Sathya Perla2243e2e2009-11-22 22:02:03 +0000552 return status;
553}
Sathya Perla8788fdc2009-07-27 22:52:03 +0000554int be_cmd_eq_create(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700555 struct be_queue_info *eq, int eq_delay)
556{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700557 struct be_mcc_wrb *wrb;
558 struct be_cmd_req_eq_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700559 struct be_dma_mem *q_mem = &eq->dma_mem;
560 int status;
561
Ivan Vecera29849612010-12-14 05:43:19 +0000562 if (mutex_lock_interruptible(&adapter->mbox_lock))
563 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700564
565 wrb = wrb_from_mbox(adapter);
566 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700567
Ajit Khaparded744b442009-12-03 06:12:06 +0000568 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700569
570 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
571 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
572
573 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
574
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700575 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
576 /* 4byte eqe*/
577 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
578 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
579 __ilog2_u32(eq->len/256));
580 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
581 eq_delay_to_mult(eq_delay));
582 be_dws_cpu_to_le(req->context, sizeof(req->context));
583
584 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
585
Sathya Perlab31c50a2009-09-17 10:30:13 -0700586 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587 if (!status) {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700588 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589 eq->id = le16_to_cpu(resp->eq_id);
590 eq->created = true;
591 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700592
Ivan Vecera29849612010-12-14 05:43:19 +0000593 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700594 return status;
595}
596
Sathya Perlab31c50a2009-09-17 10:30:13 -0700597/* Uses mbox */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000598int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700599 u8 type, bool permanent, u32 if_handle)
600{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700601 struct be_mcc_wrb *wrb;
602 struct be_cmd_req_mac_query *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700603 int status;
604
Ivan Vecera29849612010-12-14 05:43:19 +0000605 if (mutex_lock_interruptible(&adapter->mbox_lock))
606 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700607
608 wrb = wrb_from_mbox(adapter);
609 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610
Ajit Khaparded744b442009-12-03 06:12:06 +0000611 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
612 OPCODE_COMMON_NTWK_MAC_QUERY);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613
614 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
615 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
616
617 req->type = type;
618 if (permanent) {
619 req->permanent = 1;
620 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700621 req->if_id = cpu_to_le16((u16) if_handle);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 req->permanent = 0;
623 }
624
Sathya Perlab31c50a2009-09-17 10:30:13 -0700625 status = be_mbox_notify_wait(adapter);
626 if (!status) {
627 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700628 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700629 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700630
Ivan Vecera29849612010-12-14 05:43:19 +0000631 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632 return status;
633}
634
Sathya Perlab31c50a2009-09-17 10:30:13 -0700635/* Uses synchronous MCCQ */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000636int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000637 u32 if_id, u32 *pmac_id, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700639 struct be_mcc_wrb *wrb;
640 struct be_cmd_req_pmac_add *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 int status;
642
Sathya Perlab31c50a2009-09-17 10:30:13 -0700643 spin_lock_bh(&adapter->mcc_lock);
644
645 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +0000646 if (!wrb) {
647 status = -EBUSY;
648 goto err;
649 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700650 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651
Ajit Khaparded744b442009-12-03 06:12:06 +0000652 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
653 OPCODE_COMMON_NTWK_PMAC_ADD);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700654
655 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
656 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
657
Ajit Khapardef8617e02011-02-11 13:36:37 +0000658 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 req->if_id = cpu_to_le32(if_id);
660 memcpy(req->mac_address, mac_addr, ETH_ALEN);
661
Sathya Perlab31c50a2009-09-17 10:30:13 -0700662 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663 if (!status) {
664 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
665 *pmac_id = le32_to_cpu(resp->pmac_id);
666 }
667
Sathya Perla713d03942009-11-22 22:02:45 +0000668err:
Sathya Perlab31c50a2009-09-17 10:30:13 -0700669 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 return status;
671}
672
Sathya Perlab31c50a2009-09-17 10:30:13 -0700673/* Uses synchronous MCCQ */
Ajit Khapardef8617e02011-02-11 13:36:37 +0000674int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700676 struct be_mcc_wrb *wrb;
677 struct be_cmd_req_pmac_del *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678 int status;
679
Sathya Perlab31c50a2009-09-17 10:30:13 -0700680 spin_lock_bh(&adapter->mcc_lock);
681
682 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +0000683 if (!wrb) {
684 status = -EBUSY;
685 goto err;
686 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700687 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688
Ajit Khaparded744b442009-12-03 06:12:06 +0000689 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
690 OPCODE_COMMON_NTWK_PMAC_DEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691
692 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
693 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
694
Ajit Khapardef8617e02011-02-11 13:36:37 +0000695 req->hdr.domain = dom;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696 req->if_id = cpu_to_le32(if_id);
697 req->pmac_id = cpu_to_le32(pmac_id);
698
Sathya Perlab31c50a2009-09-17 10:30:13 -0700699 status = be_mcc_notify_wait(adapter);
700
Sathya Perla713d03942009-11-22 22:02:45 +0000701err:
Sathya Perlab31c50a2009-09-17 10:30:13 -0700702 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703 return status;
704}
705
Sathya Perlab31c50a2009-09-17 10:30:13 -0700706/* Uses Mbox */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000707int be_cmd_cq_create(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708 struct be_queue_info *cq, struct be_queue_info *eq,
709 bool sol_evts, bool no_delay, int coalesce_wm)
710{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700711 struct be_mcc_wrb *wrb;
712 struct be_cmd_req_cq_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713 struct be_dma_mem *q_mem = &cq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700714 void *ctxt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700715 int status;
716
Ivan Vecera29849612010-12-14 05:43:19 +0000717 if (mutex_lock_interruptible(&adapter->mbox_lock))
718 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700719
720 wrb = wrb_from_mbox(adapter);
721 req = embedded_payload(wrb);
722 ctxt = &req->context;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723
Ajit Khaparded744b442009-12-03 06:12:06 +0000724 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
725 OPCODE_COMMON_CQ_CREATE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726
727 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
728 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
729
730 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000731 if (lancer_chip(adapter)) {
Padmanabh Ratnakar8b7756c2011-03-07 03:08:52 +0000732 req->hdr.version = 2;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000733 req->page_size = 1; /* 1 for 4K */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000734 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
735 no_delay);
736 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
737 __ilog2_u32(cq->len/256));
738 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
739 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
740 ctxt, 1);
741 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
742 ctxt, eq->id);
743 AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
744 } else {
745 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
746 coalesce_wm);
747 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
748 ctxt, no_delay);
749 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
750 __ilog2_u32(cq->len/256));
751 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
752 AMAP_SET_BITS(struct amap_cq_context_be, solevent,
753 ctxt, sol_evts);
754 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
755 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
756 AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
757 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759 be_dws_cpu_to_le(ctxt, sizeof(req->context));
760
761 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
762
Sathya Perlab31c50a2009-09-17 10:30:13 -0700763 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700764 if (!status) {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700765 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 cq->id = le16_to_cpu(resp->cq_id);
767 cq->created = true;
768 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700769
Ivan Vecera29849612010-12-14 05:43:19 +0000770 mutex_unlock(&adapter->mbox_lock);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000771
772 return status;
773}
774
775static u32 be_encoded_q_len(int q_len)
776{
777 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
778 if (len_encoded == 16)
779 len_encoded = 0;
780 return len_encoded;
781}
782
Sathya Perla8788fdc2009-07-27 22:52:03 +0000783int be_cmd_mccq_create(struct be_adapter *adapter,
Sathya Perla5fb379e2009-06-18 00:02:59 +0000784 struct be_queue_info *mccq,
785 struct be_queue_info *cq)
786{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700787 struct be_mcc_wrb *wrb;
788 struct be_cmd_req_mcc_create *req;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000789 struct be_dma_mem *q_mem = &mccq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700790 void *ctxt;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000791 int status;
792
Ivan Vecera29849612010-12-14 05:43:19 +0000793 if (mutex_lock_interruptible(&adapter->mbox_lock))
794 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700795
796 wrb = wrb_from_mbox(adapter);
797 req = embedded_payload(wrb);
798 ctxt = &req->context;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000799
Ajit Khaparded744b442009-12-03 06:12:06 +0000800 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
Somnath Koturcc4ce022010-10-21 07:11:14 -0700801 OPCODE_COMMON_MCC_CREATE_EXT);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000802
803 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Somnath Koturcc4ce022010-10-21 07:11:14 -0700804 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
Sathya Perla5fb379e2009-06-18 00:02:59 +0000805
Ajit Khaparded4a2ac32010-03-11 01:35:59 +0000806 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000807 if (lancer_chip(adapter)) {
808 req->hdr.version = 1;
809 req->cq_id = cpu_to_le16(cq->id);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000810
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000811 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
812 be_encoded_q_len(mccq->len));
813 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
814 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
815 ctxt, cq->id);
816 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
817 ctxt, 1);
818
819 } else {
820 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
821 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
822 be_encoded_q_len(mccq->len));
823 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
824 }
825
Somnath Koturcc4ce022010-10-21 07:11:14 -0700826 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000827 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000828 be_dws_cpu_to_le(ctxt, sizeof(req->context));
829
830 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
831
Sathya Perlab31c50a2009-09-17 10:30:13 -0700832 status = be_mbox_notify_wait(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000833 if (!status) {
834 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
835 mccq->id = le16_to_cpu(resp->id);
836 mccq->created = true;
837 }
Ivan Vecera29849612010-12-14 05:43:19 +0000838 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700839
840 return status;
841}
842
Sathya Perla8788fdc2009-07-27 22:52:03 +0000843int be_cmd_txq_create(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700844 struct be_queue_info *txq,
845 struct be_queue_info *cq)
846{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700847 struct be_mcc_wrb *wrb;
848 struct be_cmd_req_eth_tx_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700849 struct be_dma_mem *q_mem = &txq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700850 void *ctxt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700851 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852
Ivan Vecera29849612010-12-14 05:43:19 +0000853 if (mutex_lock_interruptible(&adapter->mbox_lock))
854 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700855
856 wrb = wrb_from_mbox(adapter);
857 req = embedded_payload(wrb);
858 ctxt = &req->context;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700859
Ajit Khaparded744b442009-12-03 06:12:06 +0000860 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
861 OPCODE_ETH_TX_CREATE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700862
863 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
864 sizeof(*req));
865
Padmanabh Ratnakar8b7756c2011-03-07 03:08:52 +0000866 if (lancer_chip(adapter)) {
867 req->hdr.version = 1;
868 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
869 adapter->if_handle);
870 }
871
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700872 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
873 req->ulp_num = BE_ULP1_NUM;
874 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
875
Sathya Perlab31c50a2009-09-17 10:30:13 -0700876 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
877 be_encoded_q_len(txq->len));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700878 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
879 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
880
881 be_dws_cpu_to_le(ctxt, sizeof(req->context));
882
883 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
884
Sathya Perlab31c50a2009-09-17 10:30:13 -0700885 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700886 if (!status) {
887 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
888 txq->id = le16_to_cpu(resp->cid);
889 txq->created = true;
890 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700891
Ivan Vecera29849612010-12-14 05:43:19 +0000892 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700893
894 return status;
895}
896
Sathya Perlab31c50a2009-09-17 10:30:13 -0700897/* Uses mbox */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000898int be_cmd_rxq_create(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700899 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
Sathya Perla3abcded2010-10-03 22:12:27 -0700900 u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700901{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700902 struct be_mcc_wrb *wrb;
903 struct be_cmd_req_eth_rx_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700904 struct be_dma_mem *q_mem = &rxq->dma_mem;
905 int status;
906
Ivan Vecera29849612010-12-14 05:43:19 +0000907 if (mutex_lock_interruptible(&adapter->mbox_lock))
908 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700909
910 wrb = wrb_from_mbox(adapter);
911 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700912
Ajit Khaparded744b442009-12-03 06:12:06 +0000913 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
914 OPCODE_ETH_RX_CREATE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700915
916 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
917 sizeof(*req));
918
919 req->cq_id = cpu_to_le16(cq_id);
920 req->frag_size = fls(frag_size) - 1;
921 req->num_pages = 2;
922 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
923 req->interface_id = cpu_to_le32(if_id);
924 req->max_frame_size = cpu_to_le16(max_frame_size);
925 req->rss_queue = cpu_to_le32(rss);
926
Sathya Perlab31c50a2009-09-17 10:30:13 -0700927 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700928 if (!status) {
929 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
930 rxq->id = le16_to_cpu(resp->id);
931 rxq->created = true;
Sathya Perla3abcded2010-10-03 22:12:27 -0700932 *rss_id = resp->rss_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700933 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700934
Ivan Vecera29849612010-12-14 05:43:19 +0000935 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936
937 return status;
938}
939
Sathya Perlab31c50a2009-09-17 10:30:13 -0700940/* Generic destroyer function for all types of queues
941 * Uses Mbox
942 */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000943int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700944 int queue_type)
945{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700946 struct be_mcc_wrb *wrb;
947 struct be_cmd_req_q_destroy *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700948 u8 subsys = 0, opcode = 0;
949 int status;
950
Sathya Perlacf588472010-02-14 21:22:01 +0000951 if (adapter->eeh_err)
952 return -EIO;
953
Ivan Vecera29849612010-12-14 05:43:19 +0000954 if (mutex_lock_interruptible(&adapter->mbox_lock))
955 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700956
Sathya Perlab31c50a2009-09-17 10:30:13 -0700957 wrb = wrb_from_mbox(adapter);
958 req = embedded_payload(wrb);
959
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700960 switch (queue_type) {
961 case QTYPE_EQ:
962 subsys = CMD_SUBSYSTEM_COMMON;
963 opcode = OPCODE_COMMON_EQ_DESTROY;
964 break;
965 case QTYPE_CQ:
966 subsys = CMD_SUBSYSTEM_COMMON;
967 opcode = OPCODE_COMMON_CQ_DESTROY;
968 break;
969 case QTYPE_TXQ:
970 subsys = CMD_SUBSYSTEM_ETH;
971 opcode = OPCODE_ETH_TX_DESTROY;
972 break;
973 case QTYPE_RXQ:
974 subsys = CMD_SUBSYSTEM_ETH;
975 opcode = OPCODE_ETH_RX_DESTROY;
976 break;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000977 case QTYPE_MCCQ:
978 subsys = CMD_SUBSYSTEM_COMMON;
979 opcode = OPCODE_COMMON_MCC_DESTROY;
980 break;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700981 default:
Sathya Perla5f0b8492009-07-27 22:52:56 +0000982 BUG();
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700983 }
Ajit Khaparded744b442009-12-03 06:12:06 +0000984
985 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
986
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700987 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
988 req->id = cpu_to_le16(q->id);
989
Sathya Perlab31c50a2009-09-17 10:30:13 -0700990 status = be_mbox_notify_wait(adapter);
Sathya Perla5f0b8492009-07-27 22:52:56 +0000991
Ivan Vecera29849612010-12-14 05:43:19 +0000992 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700993
994 return status;
995}
996
Sathya Perlab31c50a2009-09-17 10:30:13 -0700997/* Create an rx filtering policy configuration on an i/f
998 * Uses mbox
999 */
Sathya Perla73d540f2009-10-14 20:20:42 +00001000int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001001 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
1002 u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001003{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001004 struct be_mcc_wrb *wrb;
1005 struct be_cmd_req_if_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001006 int status;
1007
Ivan Vecera29849612010-12-14 05:43:19 +00001008 if (mutex_lock_interruptible(&adapter->mbox_lock))
1009 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001010
1011 wrb = wrb_from_mbox(adapter);
1012 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001013
Ajit Khaparded744b442009-12-03 06:12:06 +00001014 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1015 OPCODE_COMMON_NTWK_INTERFACE_CREATE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001016
1017 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1018 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
1019
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001020 req->hdr.domain = domain;
Sathya Perla73d540f2009-10-14 20:20:42 +00001021 req->capability_flags = cpu_to_le32(cap_flags);
1022 req->enable_flags = cpu_to_le32(en_flags);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001023 req->pmac_invalid = pmac_invalid;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001024 if (!pmac_invalid)
1025 memcpy(req->mac_addr, mac, ETH_ALEN);
1026
Sathya Perlab31c50a2009-09-17 10:30:13 -07001027 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001028 if (!status) {
1029 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1030 *if_handle = le32_to_cpu(resp->interface_id);
1031 if (!pmac_invalid)
1032 *pmac_id = le32_to_cpu(resp->pmac_id);
1033 }
1034
Ivan Vecera29849612010-12-14 05:43:19 +00001035 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001036 return status;
1037}
1038
Sathya Perlab31c50a2009-09-17 10:30:13 -07001039/* Uses mbox */
Ajit Khaparde658681f2011-02-11 13:34:46 +00001040int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001042 struct be_mcc_wrb *wrb;
1043 struct be_cmd_req_if_destroy *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001044 int status;
1045
Sathya Perlacf588472010-02-14 21:22:01 +00001046 if (adapter->eeh_err)
1047 return -EIO;
1048
Ivan Vecera29849612010-12-14 05:43:19 +00001049 if (mutex_lock_interruptible(&adapter->mbox_lock))
1050 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001051
1052 wrb = wrb_from_mbox(adapter);
1053 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054
Ajit Khaparded744b442009-12-03 06:12:06 +00001055 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1056 OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001057
1058 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1059 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
1060
Ajit Khaparde658681f2011-02-11 13:34:46 +00001061 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062 req->interface_id = cpu_to_le32(interface_id);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001063
1064 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001065
Ivan Vecera29849612010-12-14 05:43:19 +00001066 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001067
1068 return status;
1069}
1070
1071/* Get stats is a non embedded command: the request is not embedded inside
1072 * WRB but is a separate dma memory block
Sathya Perlab31c50a2009-09-17 10:30:13 -07001073 * Uses asynchronous MCC
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001074 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001075int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001076{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001077 struct be_mcc_wrb *wrb;
1078 struct be_cmd_req_get_stats *req;
1079 struct be_sge *sge;
Sathya Perla713d03942009-11-22 22:02:45 +00001080 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001081
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001082 if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
1083 be_cmd_get_die_temperature(adapter);
1084
Sathya Perlab31c50a2009-09-17 10:30:13 -07001085 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086
Sathya Perlab31c50a2009-09-17 10:30:13 -07001087 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001088 if (!wrb) {
1089 status = -EBUSY;
1090 goto err;
1091 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001092 req = nonemb_cmd->va;
1093 sge = nonembedded_sgl(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001094
Ajit Khaparded744b442009-12-03 06:12:06 +00001095 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1096 OPCODE_ETH_GET_STATISTICS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001097
1098 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1099 OPCODE_ETH_GET_STATISTICS, sizeof(*req));
Ajit Khaparde63499352011-04-19 12:11:02 +00001100 wrb->tag1 = CMD_SUBSYSTEM_ETH;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001101 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1102 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1103 sge->len = cpu_to_le32(nonemb_cmd->size);
1104
Sathya Perlab31c50a2009-09-17 10:30:13 -07001105 be_mcc_notify(adapter);
Ajit Khapardeb2aebe62011-02-20 11:41:39 +00001106 adapter->stats_cmd_sent = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001107
Sathya Perla713d03942009-11-22 22:02:45 +00001108err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001109 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla713d03942009-11-22 22:02:45 +00001110 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001111}
1112
Sathya Perlab31c50a2009-09-17 10:30:13 -07001113/* Uses synchronous mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001114int be_cmd_link_status_query(struct be_adapter *adapter,
Ajit Khaparde187e8752011-04-19 12:11:46 +00001115 bool *link_up, u8 *mac_speed, u16 *link_speed, u32 dom)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001117 struct be_mcc_wrb *wrb;
1118 struct be_cmd_req_link_status *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001119 int status;
1120
Sathya Perlab31c50a2009-09-17 10:30:13 -07001121 spin_lock_bh(&adapter->mcc_lock);
1122
1123 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001124 if (!wrb) {
1125 status = -EBUSY;
1126 goto err;
1127 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001128 req = embedded_payload(wrb);
Sathya Perlaa8f447b2009-06-18 00:10:27 +00001129
1130 *link_up = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001131
Ajit Khaparded744b442009-12-03 06:12:06 +00001132 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1133 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001134
1135 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1136 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
1137
Sathya Perlab31c50a2009-09-17 10:30:13 -07001138 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001139 if (!status) {
1140 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07001141 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
Sathya Perlaa8f447b2009-06-18 00:10:27 +00001142 *link_up = true;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07001143 *link_speed = le16_to_cpu(resp->link_speed);
1144 *mac_speed = resp->mac_speed;
1145 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146 }
1147
Sathya Perla713d03942009-11-22 22:02:45 +00001148err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001149 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001150 return status;
1151}
1152
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001153/* Uses synchronous mcc */
1154int be_cmd_get_die_temperature(struct be_adapter *adapter)
1155{
1156 struct be_mcc_wrb *wrb;
1157 struct be_cmd_req_get_cntl_addnl_attribs *req;
1158 int status;
1159
1160 spin_lock_bh(&adapter->mcc_lock);
1161
1162 wrb = wrb_from_mccq(adapter);
1163 if (!wrb) {
1164 status = -EBUSY;
1165 goto err;
1166 }
1167 req = embedded_payload(wrb);
1168
1169 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1170 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
1171
1172 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1173 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
1174
1175 status = be_mcc_notify_wait(adapter);
1176 if (!status) {
1177 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
1178 embedded_payload(wrb);
1179 adapter->drv_stats.be_on_die_temperature =
1180 resp->on_die_temperature;
1181 }
1182 /* If IOCTL fails once, do not bother issuing it again */
1183 else
1184 be_get_temp_freq = 0;
1185
1186err:
1187 spin_unlock_bh(&adapter->mcc_lock);
1188 return status;
1189}
1190
Somnath Kotur311fddc2011-03-16 21:22:43 +00001191/* Uses synchronous mcc */
1192int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1193{
1194 struct be_mcc_wrb *wrb;
1195 struct be_cmd_req_get_fat *req;
1196 int status;
1197
1198 spin_lock_bh(&adapter->mcc_lock);
1199
1200 wrb = wrb_from_mccq(adapter);
1201 if (!wrb) {
1202 status = -EBUSY;
1203 goto err;
1204 }
1205 req = embedded_payload(wrb);
1206
1207 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1208 OPCODE_COMMON_MANAGE_FAT);
1209
1210 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1211 OPCODE_COMMON_MANAGE_FAT, sizeof(*req));
1212 req->fat_operation = cpu_to_le32(QUERY_FAT);
1213 status = be_mcc_notify_wait(adapter);
1214 if (!status) {
1215 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1216 if (log_size && resp->log_size)
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001217 *log_size = le32_to_cpu(resp->log_size) -
1218 sizeof(u32);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001219 }
1220err:
1221 spin_unlock_bh(&adapter->mcc_lock);
1222 return status;
1223}
1224
1225void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1226{
1227 struct be_dma_mem get_fat_cmd;
1228 struct be_mcc_wrb *wrb;
1229 struct be_cmd_req_get_fat *req;
1230 struct be_sge *sge;
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001231 u32 offset = 0, total_size, buf_size,
1232 log_offset = sizeof(u32), payload_len;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001233 int status;
1234
1235 if (buf_len == 0)
1236 return;
1237
1238 total_size = buf_len;
1239
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001240 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1241 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1242 get_fat_cmd.size,
1243 &get_fat_cmd.dma);
1244 if (!get_fat_cmd.va) {
1245 status = -ENOMEM;
1246 dev_err(&adapter->pdev->dev,
1247 "Memory allocation failure while retrieving FAT data\n");
1248 return;
1249 }
1250
Somnath Kotur311fddc2011-03-16 21:22:43 +00001251 spin_lock_bh(&adapter->mcc_lock);
1252
Somnath Kotur311fddc2011-03-16 21:22:43 +00001253 while (total_size) {
1254 buf_size = min(total_size, (u32)60*1024);
1255 total_size -= buf_size;
1256
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001257 wrb = wrb_from_mccq(adapter);
1258 if (!wrb) {
1259 status = -EBUSY;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001260 goto err;
1261 }
1262 req = get_fat_cmd.va;
1263 sge = nonembedded_sgl(wrb);
1264
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001265 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1266 be_wrb_hdr_prepare(wrb, payload_len, false, 1,
Somnath Kotur311fddc2011-03-16 21:22:43 +00001267 OPCODE_COMMON_MANAGE_FAT);
1268
1269 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001270 OPCODE_COMMON_MANAGE_FAT, payload_len);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001271
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001272 sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma));
Somnath Kotur311fddc2011-03-16 21:22:43 +00001273 sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF);
1274 sge->len = cpu_to_le32(get_fat_cmd.size);
1275
1276 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1277 req->read_log_offset = cpu_to_le32(log_offset);
1278 req->read_log_length = cpu_to_le32(buf_size);
1279 req->data_buffer_size = cpu_to_le32(buf_size);
1280
1281 status = be_mcc_notify_wait(adapter);
1282 if (!status) {
1283 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1284 memcpy(buf + offset,
1285 resp->data_buffer,
1286 resp->read_log_length);
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001287 } else {
Somnath Kotur311fddc2011-03-16 21:22:43 +00001288 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001289 goto err;
1290 }
Somnath Kotur311fddc2011-03-16 21:22:43 +00001291 offset += buf_size;
1292 log_offset += buf_size;
1293 }
1294err:
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001295 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1296 get_fat_cmd.va,
1297 get_fat_cmd.dma);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001298 spin_unlock_bh(&adapter->mcc_lock);
1299}
1300
Sathya Perlab31c50a2009-09-17 10:30:13 -07001301/* Uses Mbox */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001302int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001303{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001304 struct be_mcc_wrb *wrb;
1305 struct be_cmd_req_get_fw_version *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001306 int status;
1307
Ivan Vecera29849612010-12-14 05:43:19 +00001308 if (mutex_lock_interruptible(&adapter->mbox_lock))
1309 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001310
1311 wrb = wrb_from_mbox(adapter);
1312 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001313
Ajit Khaparded744b442009-12-03 06:12:06 +00001314 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1315 OPCODE_COMMON_GET_FW_VERSION);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001316
1317 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1318 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
1319
Sathya Perlab31c50a2009-09-17 10:30:13 -07001320 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001321 if (!status) {
1322 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1323 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
1324 }
1325
Ivan Vecera29849612010-12-14 05:43:19 +00001326 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001327 return status;
1328}
1329
Sathya Perlab31c50a2009-09-17 10:30:13 -07001330/* set the EQ delay interval of an EQ to specified value
1331 * Uses async mcc
1332 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001333int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001334{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001335 struct be_mcc_wrb *wrb;
1336 struct be_cmd_req_modify_eq_delay *req;
Sathya Perla713d03942009-11-22 22:02:45 +00001337 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001338
Sathya Perlab31c50a2009-09-17 10:30:13 -07001339 spin_lock_bh(&adapter->mcc_lock);
1340
1341 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001342 if (!wrb) {
1343 status = -EBUSY;
1344 goto err;
1345 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001346 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001347
Ajit Khaparded744b442009-12-03 06:12:06 +00001348 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1349 OPCODE_COMMON_MODIFY_EQ_DELAY);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001350
1351 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1352 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
1353
1354 req->num_eq = cpu_to_le32(1);
1355 req->delay[0].eq_id = cpu_to_le32(eq_id);
1356 req->delay[0].phase = 0;
1357 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1358
Sathya Perlab31c50a2009-09-17 10:30:13 -07001359 be_mcc_notify(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360
Sathya Perla713d03942009-11-22 22:02:45 +00001361err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001362 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla713d03942009-11-22 22:02:45 +00001363 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364}
1365
Sathya Perlab31c50a2009-09-17 10:30:13 -07001366/* Uses sycnhronous mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001367int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001368 u32 num, bool untagged, bool promiscuous)
1369{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001370 struct be_mcc_wrb *wrb;
1371 struct be_cmd_req_vlan_config *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001372 int status;
1373
Sathya Perlab31c50a2009-09-17 10:30:13 -07001374 spin_lock_bh(&adapter->mcc_lock);
1375
1376 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001377 if (!wrb) {
1378 status = -EBUSY;
1379 goto err;
1380 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001381 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001382
Ajit Khaparded744b442009-12-03 06:12:06 +00001383 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1384 OPCODE_COMMON_NTWK_VLAN_CONFIG);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385
1386 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1387 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
1388
1389 req->interface_id = if_id;
1390 req->promiscuous = promiscuous;
1391 req->untagged = untagged;
1392 req->num_vlan = num;
1393 if (!promiscuous) {
1394 memcpy(req->normal_vlan, vtag_array,
1395 req->num_vlan * sizeof(vtag_array[0]));
1396 }
1397
Sathya Perlab31c50a2009-09-17 10:30:13 -07001398 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001399
Sathya Perla713d03942009-11-22 22:02:45 +00001400err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001401 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001402 return status;
1403}
1404
Sathya Perlab31c50a2009-09-17 10:30:13 -07001405/* Uses MCC for this command as it may be called in BH context
1406 * Uses synchronous mcc
1407 */
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +00001408int be_cmd_promiscuous_config(struct be_adapter *adapter, bool en)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409{
Sathya Perla6ac7b682009-06-18 00:05:54 +00001410 struct be_mcc_wrb *wrb;
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +00001411 struct be_cmd_req_rx_filter *req;
1412 struct be_dma_mem promiscous_cmd;
1413 struct be_sge *sge;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001414 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001415
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +00001416 memset(&promiscous_cmd, 0, sizeof(struct be_dma_mem));
1417 promiscous_cmd.size = sizeof(struct be_cmd_req_rx_filter);
1418 promiscous_cmd.va = pci_alloc_consistent(adapter->pdev,
1419 promiscous_cmd.size, &promiscous_cmd.dma);
1420 if (!promiscous_cmd.va) {
1421 dev_err(&adapter->pdev->dev,
1422 "Memory allocation failure\n");
1423 return -ENOMEM;
1424 }
1425
Sathya Perla8788fdc2009-07-27 22:52:03 +00001426 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6ac7b682009-06-18 00:05:54 +00001427
Sathya Perlab31c50a2009-09-17 10:30:13 -07001428 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001429 if (!wrb) {
1430 status = -EBUSY;
1431 goto err;
1432 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001433
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +00001434 req = promiscous_cmd.va;
1435 sge = nonembedded_sgl(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +00001437 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1438 OPCODE_COMMON_NTWK_RX_FILTER);
1439 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1440 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001441
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +00001442 req->if_id = cpu_to_le32(adapter->if_handle);
1443 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS);
1444 if (en)
1445 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS);
1446
1447 sge->pa_hi = cpu_to_le32(upper_32_bits(promiscous_cmd.dma));
1448 sge->pa_lo = cpu_to_le32(promiscous_cmd.dma & 0xFFFFFFFF);
1449 sge->len = cpu_to_le32(promiscous_cmd.size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001450
Sathya Perlab31c50a2009-09-17 10:30:13 -07001451 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452
Sathya Perla713d03942009-11-22 22:02:45 +00001453err:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001454 spin_unlock_bh(&adapter->mcc_lock);
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +00001455 pci_free_consistent(adapter->pdev, promiscous_cmd.size,
1456 promiscous_cmd.va, promiscous_cmd.dma);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001457 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458}
1459
Sathya Perla6ac7b682009-06-18 00:05:54 +00001460/*
Sathya Perlab31c50a2009-09-17 10:30:13 -07001461 * Uses MCC for this command as it may be called in BH context
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001462 * (mc == NULL) => multicast promiscuous
Sathya Perla6ac7b682009-06-18 00:05:54 +00001463 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001464int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
Jiri Pirko0ddf4772010-02-20 00:13:58 +00001465 struct net_device *netdev, struct be_dma_mem *mem)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001466{
Sathya Perla6ac7b682009-06-18 00:05:54 +00001467 struct be_mcc_wrb *wrb;
Sathya Perlae7b909a2009-11-22 22:01:10 +00001468 struct be_cmd_req_mcast_mac_config *req = mem->va;
1469 struct be_sge *sge;
1470 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471
Sathya Perla8788fdc2009-07-27 22:52:03 +00001472 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6ac7b682009-06-18 00:05:54 +00001473
Sathya Perlab31c50a2009-09-17 10:30:13 -07001474 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001475 if (!wrb) {
1476 status = -EBUSY;
1477 goto err;
1478 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00001479 sge = nonembedded_sgl(wrb);
1480 memset(req, 0, sizeof(*req));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001481
Ajit Khaparded744b442009-12-03 06:12:06 +00001482 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1483 OPCODE_COMMON_NTWK_MULTICAST_SET);
Sathya Perlae7b909a2009-11-22 22:01:10 +00001484 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
1485 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
1486 sge->len = cpu_to_le32(mem->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487
1488 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1489 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
1490
1491 req->interface_id = if_id;
Jiri Pirko0ddf4772010-02-20 00:13:58 +00001492 if (netdev) {
Sathya Perla24307ee2009-06-18 00:09:25 +00001493 int i;
Jiri Pirko22bedad2010-04-01 21:22:57 +00001494 struct netdev_hw_addr *ha;
Sathya Perla24307ee2009-06-18 00:09:25 +00001495
Jiri Pirko0ddf4772010-02-20 00:13:58 +00001496 req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
Sathya Perla24307ee2009-06-18 00:09:25 +00001497
Jiri Pirko0ddf4772010-02-20 00:13:58 +00001498 i = 0;
Jiri Pirko22bedad2010-04-01 21:22:57 +00001499 netdev_for_each_mc_addr(ha, netdev)
Joe Jin408cc292010-12-06 03:00:59 +00001500 memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
Sathya Perla24307ee2009-06-18 00:09:25 +00001501 } else {
1502 req->promiscuous = 1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503 }
1504
Sathya Perlae7b909a2009-11-22 22:01:10 +00001505 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506
Sathya Perla713d03942009-11-22 22:02:45 +00001507err:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001508 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perlae7b909a2009-11-22 22:01:10 +00001509 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001510}
1511
Sathya Perlab31c50a2009-09-17 10:30:13 -07001512/* Uses synchrounous mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001513int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001514{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001515 struct be_mcc_wrb *wrb;
1516 struct be_cmd_req_set_flow_control *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001517 int status;
1518
Sathya Perlab31c50a2009-09-17 10:30:13 -07001519 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520
Sathya Perlab31c50a2009-09-17 10:30:13 -07001521 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001522 if (!wrb) {
1523 status = -EBUSY;
1524 goto err;
1525 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001526 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527
Ajit Khaparded744b442009-12-03 06:12:06 +00001528 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1529 OPCODE_COMMON_SET_FLOW_CONTROL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530
1531 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1532 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
1533
1534 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1535 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1536
Sathya Perlab31c50a2009-09-17 10:30:13 -07001537 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538
Sathya Perla713d03942009-11-22 22:02:45 +00001539err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001540 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541 return status;
1542}
1543
Sathya Perlab31c50a2009-09-17 10:30:13 -07001544/* Uses sycn mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001545int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001547 struct be_mcc_wrb *wrb;
1548 struct be_cmd_req_get_flow_control *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549 int status;
1550
Sathya Perlab31c50a2009-09-17 10:30:13 -07001551 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552
Sathya Perlab31c50a2009-09-17 10:30:13 -07001553 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001554 if (!wrb) {
1555 status = -EBUSY;
1556 goto err;
1557 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001558 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559
Ajit Khaparded744b442009-12-03 06:12:06 +00001560 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1561 OPCODE_COMMON_GET_FLOW_CONTROL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001562
1563 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1564 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
1565
Sathya Perlab31c50a2009-09-17 10:30:13 -07001566 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567 if (!status) {
1568 struct be_cmd_resp_get_flow_control *resp =
1569 embedded_payload(wrb);
1570 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1571 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1572 }
1573
Sathya Perla713d03942009-11-22 22:02:45 +00001574err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001575 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001576 return status;
1577}
1578
Sathya Perlab31c50a2009-09-17 10:30:13 -07001579/* Uses mbox */
Sathya Perla3abcded2010-10-03 22:12:27 -07001580int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1581 u32 *mode, u32 *caps)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001582{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001583 struct be_mcc_wrb *wrb;
1584 struct be_cmd_req_query_fw_cfg *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585 int status;
1586
Ivan Vecera29849612010-12-14 05:43:19 +00001587 if (mutex_lock_interruptible(&adapter->mbox_lock))
1588 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001589
Sathya Perlab31c50a2009-09-17 10:30:13 -07001590 wrb = wrb_from_mbox(adapter);
1591 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001592
Ajit Khaparded744b442009-12-03 06:12:06 +00001593 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1594 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001595
1596 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1597 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
1598
Sathya Perlab31c50a2009-09-17 10:30:13 -07001599 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001600 if (!status) {
1601 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1602 *port_num = le32_to_cpu(resp->phys_port);
Ajit Khaparde3486be22010-07-23 02:04:54 +00001603 *mode = le32_to_cpu(resp->function_mode);
Sathya Perla3abcded2010-10-03 22:12:27 -07001604 *caps = le32_to_cpu(resp->function_caps);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001605 }
1606
Ivan Vecera29849612010-12-14 05:43:19 +00001607 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001608 return status;
1609}
sarveshwarb14074ea2009-08-05 13:05:24 -07001610
Sathya Perlab31c50a2009-09-17 10:30:13 -07001611/* Uses mbox */
sarveshwarb14074ea2009-08-05 13:05:24 -07001612int be_cmd_reset_function(struct be_adapter *adapter)
1613{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001614 struct be_mcc_wrb *wrb;
1615 struct be_cmd_req_hdr *req;
sarveshwarb14074ea2009-08-05 13:05:24 -07001616 int status;
1617
Ivan Vecera29849612010-12-14 05:43:19 +00001618 if (mutex_lock_interruptible(&adapter->mbox_lock))
1619 return -1;
sarveshwarb14074ea2009-08-05 13:05:24 -07001620
Sathya Perlab31c50a2009-09-17 10:30:13 -07001621 wrb = wrb_from_mbox(adapter);
1622 req = embedded_payload(wrb);
sarveshwarb14074ea2009-08-05 13:05:24 -07001623
Ajit Khaparded744b442009-12-03 06:12:06 +00001624 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1625 OPCODE_COMMON_FUNCTION_RESET);
sarveshwarb14074ea2009-08-05 13:05:24 -07001626
1627 be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1628 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1629
Sathya Perlab31c50a2009-09-17 10:30:13 -07001630 status = be_mbox_notify_wait(adapter);
sarveshwarb14074ea2009-08-05 13:05:24 -07001631
Ivan Vecera29849612010-12-14 05:43:19 +00001632 mutex_unlock(&adapter->mbox_lock);
sarveshwarb14074ea2009-08-05 13:05:24 -07001633 return status;
1634}
Ajit Khaparde84517482009-09-04 03:12:16 +00001635
Sathya Perla3abcded2010-10-03 22:12:27 -07001636int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1637{
1638 struct be_mcc_wrb *wrb;
1639 struct be_cmd_req_rss_config *req;
1640 u32 myhash[10];
1641 int status;
1642
Ivan Vecera29849612010-12-14 05:43:19 +00001643 if (mutex_lock_interruptible(&adapter->mbox_lock))
1644 return -1;
Sathya Perla3abcded2010-10-03 22:12:27 -07001645
1646 wrb = wrb_from_mbox(adapter);
1647 req = embedded_payload(wrb);
1648
1649 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1650 OPCODE_ETH_RSS_CONFIG);
1651
1652 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1653 OPCODE_ETH_RSS_CONFIG, sizeof(*req));
1654
1655 req->if_id = cpu_to_le32(adapter->if_handle);
1656 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
1657 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1658 memcpy(req->cpu_table, rsstable, table_size);
1659 memcpy(req->hash, myhash, sizeof(myhash));
1660 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1661
1662 status = be_mbox_notify_wait(adapter);
1663
Ivan Vecera29849612010-12-14 05:43:19 +00001664 mutex_unlock(&adapter->mbox_lock);
Sathya Perla3abcded2010-10-03 22:12:27 -07001665 return status;
1666}
1667
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001668/* Uses sync mcc */
1669int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1670 u8 bcn, u8 sts, u8 state)
1671{
1672 struct be_mcc_wrb *wrb;
1673 struct be_cmd_req_enable_disable_beacon *req;
1674 int status;
1675
1676 spin_lock_bh(&adapter->mcc_lock);
1677
1678 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001679 if (!wrb) {
1680 status = -EBUSY;
1681 goto err;
1682 }
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001683 req = embedded_payload(wrb);
1684
Ajit Khaparded744b442009-12-03 06:12:06 +00001685 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1686 OPCODE_COMMON_ENABLE_DISABLE_BEACON);
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001687
1688 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1689 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
1690
1691 req->port_num = port_num;
1692 req->beacon_state = state;
1693 req->beacon_duration = bcn;
1694 req->status_duration = sts;
1695
1696 status = be_mcc_notify_wait(adapter);
1697
Sathya Perla713d03942009-11-22 22:02:45 +00001698err:
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001699 spin_unlock_bh(&adapter->mcc_lock);
1700 return status;
1701}
1702
1703/* Uses sync mcc */
1704int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1705{
1706 struct be_mcc_wrb *wrb;
1707 struct be_cmd_req_get_beacon_state *req;
1708 int status;
1709
1710 spin_lock_bh(&adapter->mcc_lock);
1711
1712 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001713 if (!wrb) {
1714 status = -EBUSY;
1715 goto err;
1716 }
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001717 req = embedded_payload(wrb);
1718
Ajit Khaparded744b442009-12-03 06:12:06 +00001719 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1720 OPCODE_COMMON_GET_BEACON_STATE);
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001721
1722 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1723 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
1724
1725 req->port_num = port_num;
1726
1727 status = be_mcc_notify_wait(adapter);
1728 if (!status) {
1729 struct be_cmd_resp_get_beacon_state *resp =
1730 embedded_payload(wrb);
1731 *state = resp->beacon_state;
1732 }
1733
Sathya Perla713d03942009-11-22 22:02:45 +00001734err:
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001735 spin_unlock_bh(&adapter->mcc_lock);
1736 return status;
1737}
1738
Ajit Khaparde84517482009-09-04 03:12:16 +00001739int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1740 u32 flash_type, u32 flash_opcode, u32 buf_size)
1741{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001742 struct be_mcc_wrb *wrb;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00001743 struct be_cmd_write_flashrom *req;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001744 struct be_sge *sge;
Ajit Khaparde84517482009-09-04 03:12:16 +00001745 int status;
1746
Sathya Perlab31c50a2009-09-17 10:30:13 -07001747 spin_lock_bh(&adapter->mcc_lock);
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07001748 adapter->flash_status = 0;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001749
1750 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001751 if (!wrb) {
1752 status = -EBUSY;
Dan Carpenter2892d9c2010-05-26 04:46:35 +00001753 goto err_unlock;
Sathya Perla713d03942009-11-22 22:02:45 +00001754 }
1755 req = cmd->va;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001756 sge = nonembedded_sgl(wrb);
1757
Ajit Khaparded744b442009-12-03 06:12:06 +00001758 be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1759 OPCODE_COMMON_WRITE_FLASHROM);
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07001760 wrb->tag1 = CMD_SUBSYSTEM_COMMON;
Ajit Khaparde84517482009-09-04 03:12:16 +00001761
1762 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1763 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
1764 sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1765 sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1766 sge->len = cpu_to_le32(cmd->size);
1767
1768 req->params.op_type = cpu_to_le32(flash_type);
1769 req->params.op_code = cpu_to_le32(flash_opcode);
1770 req->params.data_buf_size = cpu_to_le32(buf_size);
1771
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07001772 be_mcc_notify(adapter);
1773 spin_unlock_bh(&adapter->mcc_lock);
1774
1775 if (!wait_for_completion_timeout(&adapter->flash_compl,
1776 msecs_to_jiffies(12000)))
1777 status = -1;
1778 else
1779 status = adapter->flash_status;
Ajit Khaparde84517482009-09-04 03:12:16 +00001780
Dan Carpenter2892d9c2010-05-26 04:46:35 +00001781 return status;
1782
1783err_unlock:
1784 spin_unlock_bh(&adapter->mcc_lock);
Ajit Khaparde84517482009-09-04 03:12:16 +00001785 return status;
1786}
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001787
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00001788int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1789 int offset)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001790{
1791 struct be_mcc_wrb *wrb;
1792 struct be_cmd_write_flashrom *req;
1793 int status;
1794
1795 spin_lock_bh(&adapter->mcc_lock);
1796
1797 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001798 if (!wrb) {
1799 status = -EBUSY;
1800 goto err;
1801 }
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001802 req = embedded_payload(wrb);
1803
Ajit Khaparded744b442009-12-03 06:12:06 +00001804 be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
1805 OPCODE_COMMON_READ_FLASHROM);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001806
1807 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1808 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
1809
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00001810 req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001811 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
Ajit Khaparde8b93b712010-03-31 01:57:10 +00001812 req->params.offset = cpu_to_le32(offset);
1813 req->params.data_buf_size = cpu_to_le32(0x4);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001814
1815 status = be_mcc_notify_wait(adapter);
1816 if (!status)
1817 memcpy(flashed_crc, req->params.data_buf, 4);
1818
Sathya Perla713d03942009-11-22 22:02:45 +00001819err:
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001820 spin_unlock_bh(&adapter->mcc_lock);
1821 return status;
1822}
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00001823
Dan Carpenterc196b022010-05-26 04:47:39 +00001824int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00001825 struct be_dma_mem *nonemb_cmd)
1826{
1827 struct be_mcc_wrb *wrb;
1828 struct be_cmd_req_acpi_wol_magic_config *req;
1829 struct be_sge *sge;
1830 int status;
1831
1832 spin_lock_bh(&adapter->mcc_lock);
1833
1834 wrb = wrb_from_mccq(adapter);
1835 if (!wrb) {
1836 status = -EBUSY;
1837 goto err;
1838 }
1839 req = nonemb_cmd->va;
1840 sge = nonembedded_sgl(wrb);
1841
1842 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1843 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
1844
1845 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1846 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
1847 memcpy(req->magic_mac, mac, ETH_ALEN);
1848
1849 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1850 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1851 sge->len = cpu_to_le32(nonemb_cmd->size);
1852
1853 status = be_mcc_notify_wait(adapter);
1854
1855err:
1856 spin_unlock_bh(&adapter->mcc_lock);
1857 return status;
1858}
Suresh Rff33a6e2009-12-03 16:15:52 -08001859
Sarveshwar Bandifced9992009-12-23 04:41:44 +00001860int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1861 u8 loopback_type, u8 enable)
1862{
1863 struct be_mcc_wrb *wrb;
1864 struct be_cmd_req_set_lmode *req;
1865 int status;
1866
1867 spin_lock_bh(&adapter->mcc_lock);
1868
1869 wrb = wrb_from_mccq(adapter);
1870 if (!wrb) {
1871 status = -EBUSY;
1872 goto err;
1873 }
1874
1875 req = embedded_payload(wrb);
1876
1877 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1878 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
1879
1880 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1881 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
1882 sizeof(*req));
1883
1884 req->src_port = port_num;
1885 req->dest_port = port_num;
1886 req->loopback_type = loopback_type;
1887 req->loopback_state = enable;
1888
1889 status = be_mcc_notify_wait(adapter);
1890err:
1891 spin_unlock_bh(&adapter->mcc_lock);
1892 return status;
1893}
1894
Suresh Rff33a6e2009-12-03 16:15:52 -08001895int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1896 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
1897{
1898 struct be_mcc_wrb *wrb;
1899 struct be_cmd_req_loopback_test *req;
1900 int status;
1901
1902 spin_lock_bh(&adapter->mcc_lock);
1903
1904 wrb = wrb_from_mccq(adapter);
1905 if (!wrb) {
1906 status = -EBUSY;
1907 goto err;
1908 }
1909
1910 req = embedded_payload(wrb);
1911
1912 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1913 OPCODE_LOWLEVEL_LOOPBACK_TEST);
1914
1915 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1916 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
Sathya Perla3ffd0512010-06-01 00:19:33 -07001917 req->hdr.timeout = cpu_to_le32(4);
Suresh Rff33a6e2009-12-03 16:15:52 -08001918
1919 req->pattern = cpu_to_le64(pattern);
1920 req->src_port = cpu_to_le32(port_num);
1921 req->dest_port = cpu_to_le32(port_num);
1922 req->pkt_size = cpu_to_le32(pkt_size);
1923 req->num_pkts = cpu_to_le32(num_pkts);
1924 req->loopback_type = cpu_to_le32(loopback_type);
1925
1926 status = be_mcc_notify_wait(adapter);
1927 if (!status) {
1928 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
1929 status = le32_to_cpu(resp->status);
1930 }
1931
1932err:
1933 spin_unlock_bh(&adapter->mcc_lock);
1934 return status;
1935}
1936
1937int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
1938 u32 byte_cnt, struct be_dma_mem *cmd)
1939{
1940 struct be_mcc_wrb *wrb;
1941 struct be_cmd_req_ddrdma_test *req;
1942 struct be_sge *sge;
1943 int status;
1944 int i, j = 0;
1945
1946 spin_lock_bh(&adapter->mcc_lock);
1947
1948 wrb = wrb_from_mccq(adapter);
1949 if (!wrb) {
1950 status = -EBUSY;
1951 goto err;
1952 }
1953 req = cmd->va;
1954 sge = nonembedded_sgl(wrb);
1955 be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1956 OPCODE_LOWLEVEL_HOST_DDR_DMA);
1957 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1958 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
1959
1960 sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1961 sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1962 sge->len = cpu_to_le32(cmd->size);
1963
1964 req->pattern = cpu_to_le64(pattern);
1965 req->byte_count = cpu_to_le32(byte_cnt);
1966 for (i = 0; i < byte_cnt; i++) {
1967 req->snd_buff[i] = (u8)(pattern >> (j*8));
1968 j++;
1969 if (j > 7)
1970 j = 0;
1971 }
1972
1973 status = be_mcc_notify_wait(adapter);
1974
1975 if (!status) {
1976 struct be_cmd_resp_ddrdma_test *resp;
1977 resp = cmd->va;
1978 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
1979 resp->snd_err) {
1980 status = -1;
1981 }
1982 }
1983
1984err:
1985 spin_unlock_bh(&adapter->mcc_lock);
1986 return status;
1987}
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08001988
Dan Carpenterc196b022010-05-26 04:47:39 +00001989int be_cmd_get_seeprom_data(struct be_adapter *adapter,
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08001990 struct be_dma_mem *nonemb_cmd)
1991{
1992 struct be_mcc_wrb *wrb;
1993 struct be_cmd_req_seeprom_read *req;
1994 struct be_sge *sge;
1995 int status;
1996
1997 spin_lock_bh(&adapter->mcc_lock);
1998
1999 wrb = wrb_from_mccq(adapter);
Ajit Khapardee45ff012011-02-04 17:18:28 +00002000 if (!wrb) {
2001 status = -EBUSY;
2002 goto err;
2003 }
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002004 req = nonemb_cmd->va;
2005 sge = nonembedded_sgl(wrb);
2006
2007 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
2008 OPCODE_COMMON_SEEPROM_READ);
2009
2010 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2011 OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
2012
2013 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
2014 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
2015 sge->len = cpu_to_le32(nonemb_cmd->size);
2016
2017 status = be_mcc_notify_wait(adapter);
2018
Ajit Khapardee45ff012011-02-04 17:18:28 +00002019err:
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002020 spin_unlock_bh(&adapter->mcc_lock);
2021 return status;
2022}
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002023
2024int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd)
2025{
2026 struct be_mcc_wrb *wrb;
2027 struct be_cmd_req_get_phy_info *req;
2028 struct be_sge *sge;
2029 int status;
2030
2031 spin_lock_bh(&adapter->mcc_lock);
2032
2033 wrb = wrb_from_mccq(adapter);
2034 if (!wrb) {
2035 status = -EBUSY;
2036 goto err;
2037 }
2038
2039 req = cmd->va;
2040 sge = nonembedded_sgl(wrb);
2041
2042 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
2043 OPCODE_COMMON_GET_PHY_DETAILS);
2044
2045 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2046 OPCODE_COMMON_GET_PHY_DETAILS,
2047 sizeof(*req));
2048
2049 sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
2050 sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
2051 sge->len = cpu_to_le32(cmd->size);
2052
2053 status = be_mcc_notify_wait(adapter);
2054err:
2055 spin_unlock_bh(&adapter->mcc_lock);
2056 return status;
2057}
Ajit Khapardee1d18732010-07-23 01:52:13 +00002058
2059int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2060{
2061 struct be_mcc_wrb *wrb;
2062 struct be_cmd_req_set_qos *req;
2063 int status;
2064
2065 spin_lock_bh(&adapter->mcc_lock);
2066
2067 wrb = wrb_from_mccq(adapter);
2068 if (!wrb) {
2069 status = -EBUSY;
2070 goto err;
2071 }
2072
2073 req = embedded_payload(wrb);
2074
2075 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2076 OPCODE_COMMON_SET_QOS);
2077
2078 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2079 OPCODE_COMMON_SET_QOS, sizeof(*req));
2080
2081 req->hdr.domain = domain;
Ajit Khaparde6bff57a2011-02-11 13:33:02 +00002082 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2083 req->max_bps_nic = cpu_to_le32(bps);
Ajit Khapardee1d18732010-07-23 01:52:13 +00002084
2085 status = be_mcc_notify_wait(adapter);
2086
2087err:
2088 spin_unlock_bh(&adapter->mcc_lock);
2089 return status;
2090}
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002091
2092int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2093{
2094 struct be_mcc_wrb *wrb;
2095 struct be_cmd_req_cntl_attribs *req;
2096 struct be_cmd_resp_cntl_attribs *resp;
2097 struct be_sge *sge;
2098 int status;
2099 int payload_len = max(sizeof(*req), sizeof(*resp));
2100 struct mgmt_controller_attrib *attribs;
2101 struct be_dma_mem attribs_cmd;
2102
2103 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2104 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2105 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2106 &attribs_cmd.dma);
2107 if (!attribs_cmd.va) {
2108 dev_err(&adapter->pdev->dev,
2109 "Memory allocation failure\n");
2110 return -ENOMEM;
2111 }
2112
2113 if (mutex_lock_interruptible(&adapter->mbox_lock))
2114 return -1;
2115
2116 wrb = wrb_from_mbox(adapter);
2117 if (!wrb) {
2118 status = -EBUSY;
2119 goto err;
2120 }
2121 req = attribs_cmd.va;
2122 sge = nonembedded_sgl(wrb);
2123
2124 be_wrb_hdr_prepare(wrb, payload_len, false, 1,
2125 OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
2126 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2127 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
2128 sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
2129 sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
2130 sge->len = cpu_to_le32(attribs_cmd.size);
2131
2132 status = be_mbox_notify_wait(adapter);
2133 if (!status) {
2134 attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va +
2135 sizeof(struct be_cmd_resp_hdr));
2136 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2137 }
2138
2139err:
2140 mutex_unlock(&adapter->mbox_lock);
2141 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2142 attribs_cmd.dma);
2143 return status;
2144}
Sathya Perla2e588f82011-03-11 02:49:26 +00002145
2146/* Uses mbox */
2147int be_cmd_check_native_mode(struct be_adapter *adapter)
2148{
2149 struct be_mcc_wrb *wrb;
2150 struct be_cmd_req_set_func_cap *req;
2151 int status;
2152
2153 if (mutex_lock_interruptible(&adapter->mbox_lock))
2154 return -1;
2155
2156 wrb = wrb_from_mbox(adapter);
2157 if (!wrb) {
2158 status = -EBUSY;
2159 goto err;
2160 }
2161
2162 req = embedded_payload(wrb);
2163
2164 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2165 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
2166
2167 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2168 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
2169
2170 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2171 CAPABILITY_BE3_NATIVE_ERX_API);
2172 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2173
2174 status = be_mbox_notify_wait(adapter);
2175 if (!status) {
2176 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2177 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2178 CAPABILITY_BE3_NATIVE_ERX_API;
2179 }
2180err:
2181 mutex_unlock(&adapter->mbox_lock);
2182 return status;
2183}