blob: 10ebaaae0161d7474d6578763be8e906022fb846 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07007 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08008 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
Ralph Campbell9b513092006-12-12 14:27:41 -080044#include <linux/mm.h>
45#include <linux/dma-mapping.h>
Michael S. Tsirkin459d6e22007-02-04 14:11:55 -080046#include <linux/kref.h>
Dotan Barakbfb3ea12007-07-31 16:49:15 +030047#include <linux/list.h>
48#include <linux/rwsem.h>
Adrian Bunk87ae9af2007-10-30 10:35:04 +010049#include <linux/scatterlist.h>
Roland Dreiere2773c02005-07-07 17:57:10 -070050
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/atomic.h>
Roland Dreiere2773c02005-07-07 17:57:10 -070052#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
54union ib_gid {
55 u8 raw[16];
56 struct {
Sean Hefty97f52eb2005-08-13 21:05:57 -070057 __be64 subnet_prefix;
58 __be64 interface_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 } global;
60};
61
Tom Tucker07ebafb2006-08-03 16:02:42 -050062enum rdma_node_type {
63 /* IB values map to NodeInfo:NodeType. */
64 RDMA_NODE_IB_CA = 1,
65 RDMA_NODE_IB_SWITCH,
66 RDMA_NODE_IB_ROUTER,
67 RDMA_NODE_RNIC
Linus Torvalds1da177e2005-04-16 15:20:36 -070068};
69
Tom Tucker07ebafb2006-08-03 16:02:42 -050070enum rdma_transport_type {
71 RDMA_TRANSPORT_IB,
72 RDMA_TRANSPORT_IWARP
73};
74
75enum rdma_transport_type
76rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
77
Linus Torvalds1da177e2005-04-16 15:20:36 -070078enum ib_device_cap_flags {
79 IB_DEVICE_RESIZE_MAX_WR = 1,
80 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
81 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
82 IB_DEVICE_RAW_MULTI = (1<<3),
83 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
84 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
85 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
86 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
87 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
88 IB_DEVICE_INIT_TYPE = (1<<9),
89 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
90 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
91 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
92 IB_DEVICE_SRQ_RESIZE = (1<<13),
93 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
Tom Tucker07ebafb2006-08-03 16:02:42 -050094 IB_DEVICE_ZERO_STAG = (1<<15),
Roland Dreier0f39cf32008-04-16 21:09:32 -070095 IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */
Eli Cohene0605d92008-01-30 18:30:57 +020096 IB_DEVICE_MEM_WINDOW = (1<<17),
97 /*
98 * Devices should set IB_DEVICE_UD_IP_SUM if they support
99 * insertion of UDP and TCP checksum on outgoing UD IPoIB
100 * messages and can verify the validity of checksum for
101 * incoming messages. Setting this flag implies that the
102 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
103 */
104 IB_DEVICE_UD_IP_CSUM = (1<<18),
Eli Cohenc93570f2008-04-16 21:09:27 -0700105 IB_DEVICE_UD_TSO = (1<<19),
Steve Wise00f7ec32008-07-14 23:48:45 -0700106 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107};
108
109enum ib_atomic_cap {
110 IB_ATOMIC_NONE,
111 IB_ATOMIC_HCA,
112 IB_ATOMIC_GLOB
113};
114
115struct ib_device_attr {
116 u64 fw_ver;
Sean Hefty97f52eb2005-08-13 21:05:57 -0700117 __be64 sys_image_guid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 u64 max_mr_size;
119 u64 page_size_cap;
120 u32 vendor_id;
121 u32 vendor_part_id;
122 u32 hw_ver;
123 int max_qp;
124 int max_qp_wr;
125 int device_cap_flags;
126 int max_sge;
127 int max_sge_rd;
128 int max_cq;
129 int max_cqe;
130 int max_mr;
131 int max_pd;
132 int max_qp_rd_atom;
133 int max_ee_rd_atom;
134 int max_res_rd_atom;
135 int max_qp_init_rd_atom;
136 int max_ee_init_rd_atom;
137 enum ib_atomic_cap atomic_cap;
138 int max_ee;
139 int max_rdd;
140 int max_mw;
141 int max_raw_ipv6_qp;
142 int max_raw_ethy_qp;
143 int max_mcast_grp;
144 int max_mcast_qp_attach;
145 int max_total_mcast_qp_attach;
146 int max_ah;
147 int max_fmr;
148 int max_map_per_fmr;
149 int max_srq;
150 int max_srq_wr;
151 int max_srq_sge;
Steve Wise00f7ec32008-07-14 23:48:45 -0700152 unsigned int max_fast_reg_page_list_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 u16 max_pkeys;
154 u8 local_ca_ack_delay;
155};
156
157enum ib_mtu {
158 IB_MTU_256 = 1,
159 IB_MTU_512 = 2,
160 IB_MTU_1024 = 3,
161 IB_MTU_2048 = 4,
162 IB_MTU_4096 = 5
163};
164
165static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
166{
167 switch (mtu) {
168 case IB_MTU_256: return 256;
169 case IB_MTU_512: return 512;
170 case IB_MTU_1024: return 1024;
171 case IB_MTU_2048: return 2048;
172 case IB_MTU_4096: return 4096;
173 default: return -1;
174 }
175}
176
177enum ib_port_state {
178 IB_PORT_NOP = 0,
179 IB_PORT_DOWN = 1,
180 IB_PORT_INIT = 2,
181 IB_PORT_ARMED = 3,
182 IB_PORT_ACTIVE = 4,
183 IB_PORT_ACTIVE_DEFER = 5
184};
185
186enum ib_port_cap_flags {
187 IB_PORT_SM = 1 << 1,
188 IB_PORT_NOTICE_SUP = 1 << 2,
189 IB_PORT_TRAP_SUP = 1 << 3,
190 IB_PORT_OPT_IPD_SUP = 1 << 4,
191 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
192 IB_PORT_SL_MAP_SUP = 1 << 6,
193 IB_PORT_MKEY_NVRAM = 1 << 7,
194 IB_PORT_PKEY_NVRAM = 1 << 8,
195 IB_PORT_LED_INFO_SUP = 1 << 9,
196 IB_PORT_SM_DISABLED = 1 << 10,
197 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
198 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
199 IB_PORT_CM_SUP = 1 << 16,
200 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
201 IB_PORT_REINIT_SUP = 1 << 18,
202 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
203 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
204 IB_PORT_DR_NOTICE_SUP = 1 << 21,
205 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
206 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
207 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
208 IB_PORT_CLIENT_REG_SUP = 1 << 25
209};
210
211enum ib_port_width {
212 IB_WIDTH_1X = 1,
213 IB_WIDTH_4X = 2,
214 IB_WIDTH_8X = 4,
215 IB_WIDTH_12X = 8
216};
217
218static inline int ib_width_enum_to_int(enum ib_port_width width)
219{
220 switch (width) {
221 case IB_WIDTH_1X: return 1;
222 case IB_WIDTH_4X: return 4;
223 case IB_WIDTH_8X: return 8;
224 case IB_WIDTH_12X: return 12;
225 default: return -1;
226 }
227}
228
Steve Wise7f624d02008-07-14 23:48:48 -0700229struct ib_protocol_stats {
230 /* TBD... */
231};
232
233struct iw_protocol_stats {
234 u64 ipInReceives;
235 u64 ipInHdrErrors;
236 u64 ipInTooBigErrors;
237 u64 ipInNoRoutes;
238 u64 ipInAddrErrors;
239 u64 ipInUnknownProtos;
240 u64 ipInTruncatedPkts;
241 u64 ipInDiscards;
242 u64 ipInDelivers;
243 u64 ipOutForwDatagrams;
244 u64 ipOutRequests;
245 u64 ipOutDiscards;
246 u64 ipOutNoRoutes;
247 u64 ipReasmTimeout;
248 u64 ipReasmReqds;
249 u64 ipReasmOKs;
250 u64 ipReasmFails;
251 u64 ipFragOKs;
252 u64 ipFragFails;
253 u64 ipFragCreates;
254 u64 ipInMcastPkts;
255 u64 ipOutMcastPkts;
256 u64 ipInBcastPkts;
257 u64 ipOutBcastPkts;
258
259 u64 tcpRtoAlgorithm;
260 u64 tcpRtoMin;
261 u64 tcpRtoMax;
262 u64 tcpMaxConn;
263 u64 tcpActiveOpens;
264 u64 tcpPassiveOpens;
265 u64 tcpAttemptFails;
266 u64 tcpEstabResets;
267 u64 tcpCurrEstab;
268 u64 tcpInSegs;
269 u64 tcpOutSegs;
270 u64 tcpRetransSegs;
271 u64 tcpInErrs;
272 u64 tcpOutRsts;
273};
274
275union rdma_protocol_stats {
276 struct ib_protocol_stats ib;
277 struct iw_protocol_stats iw;
278};
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280struct ib_port_attr {
281 enum ib_port_state state;
282 enum ib_mtu max_mtu;
283 enum ib_mtu active_mtu;
284 int gid_tbl_len;
285 u32 port_cap_flags;
286 u32 max_msg_sz;
287 u32 bad_pkey_cntr;
288 u32 qkey_viol_cntr;
289 u16 pkey_tbl_len;
290 u16 lid;
291 u16 sm_lid;
292 u8 lmc;
293 u8 max_vl_num;
294 u8 sm_sl;
295 u8 subnet_timeout;
296 u8 init_type_reply;
297 u8 active_width;
298 u8 active_speed;
299 u8 phys_state;
300};
301
302enum ib_device_modify_flags {
Roland Dreierc5bcbbb2006-02-02 09:47:14 -0800303 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
304 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305};
306
307struct ib_device_modify {
308 u64 sys_image_guid;
Roland Dreierc5bcbbb2006-02-02 09:47:14 -0800309 char node_desc[64];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310};
311
312enum ib_port_modify_flags {
313 IB_PORT_SHUTDOWN = 1,
314 IB_PORT_INIT_TYPE = (1<<2),
315 IB_PORT_RESET_QKEY_CNTR = (1<<3)
316};
317
318struct ib_port_modify {
319 u32 set_port_cap_mask;
320 u32 clr_port_cap_mask;
321 u8 init_type;
322};
323
324enum ib_event_type {
325 IB_EVENT_CQ_ERR,
326 IB_EVENT_QP_FATAL,
327 IB_EVENT_QP_REQ_ERR,
328 IB_EVENT_QP_ACCESS_ERR,
329 IB_EVENT_COMM_EST,
330 IB_EVENT_SQ_DRAINED,
331 IB_EVENT_PATH_MIG,
332 IB_EVENT_PATH_MIG_ERR,
333 IB_EVENT_DEVICE_FATAL,
334 IB_EVENT_PORT_ACTIVE,
335 IB_EVENT_PORT_ERR,
336 IB_EVENT_LID_CHANGE,
337 IB_EVENT_PKEY_CHANGE,
Roland Dreierd41fcc62005-08-18 12:23:08 -0700338 IB_EVENT_SM_CHANGE,
339 IB_EVENT_SRQ_ERR,
340 IB_EVENT_SRQ_LIMIT_REACHED,
Leonid Arsh63942c92006-06-17 20:37:35 -0700341 IB_EVENT_QP_LAST_WQE_REACHED,
342 IB_EVENT_CLIENT_REREGISTER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343};
344
345struct ib_event {
346 struct ib_device *device;
347 union {
348 struct ib_cq *cq;
349 struct ib_qp *qp;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700350 struct ib_srq *srq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 u8 port_num;
352 } element;
353 enum ib_event_type event;
354};
355
356struct ib_event_handler {
357 struct ib_device *device;
358 void (*handler)(struct ib_event_handler *, struct ib_event *);
359 struct list_head list;
360};
361
362#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
363 do { \
364 (_ptr)->device = _device; \
365 (_ptr)->handler = _handler; \
366 INIT_LIST_HEAD(&(_ptr)->list); \
367 } while (0)
368
369struct ib_global_route {
370 union ib_gid dgid;
371 u32 flow_label;
372 u8 sgid_index;
373 u8 hop_limit;
374 u8 traffic_class;
375};
376
Hal Rosenstock513789e2005-07-27 11:45:34 -0700377struct ib_grh {
Sean Hefty97f52eb2005-08-13 21:05:57 -0700378 __be32 version_tclass_flow;
379 __be16 paylen;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700380 u8 next_hdr;
381 u8 hop_limit;
382 union ib_gid sgid;
383 union ib_gid dgid;
384};
385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386enum {
387 IB_MULTICAST_QPN = 0xffffff
388};
389
Sean Hefty97f52eb2005-08-13 21:05:57 -0700390#define IB_LID_PERMISSIVE __constant_htons(0xFFFF)
391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392enum ib_ah_flags {
393 IB_AH_GRH = 1
394};
395
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700396enum ib_rate {
397 IB_RATE_PORT_CURRENT = 0,
398 IB_RATE_2_5_GBPS = 2,
399 IB_RATE_5_GBPS = 5,
400 IB_RATE_10_GBPS = 3,
401 IB_RATE_20_GBPS = 6,
402 IB_RATE_30_GBPS = 4,
403 IB_RATE_40_GBPS = 7,
404 IB_RATE_60_GBPS = 8,
405 IB_RATE_80_GBPS = 9,
406 IB_RATE_120_GBPS = 10
407};
408
409/**
410 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
411 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
412 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
413 * @rate: rate to convert.
414 */
415int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
416
417/**
418 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
419 * enum.
420 * @mult: multiple to convert.
421 */
422enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424struct ib_ah_attr {
425 struct ib_global_route grh;
426 u16 dlid;
427 u8 sl;
428 u8 src_path_bits;
429 u8 static_rate;
430 u8 ah_flags;
431 u8 port_num;
432};
433
434enum ib_wc_status {
435 IB_WC_SUCCESS,
436 IB_WC_LOC_LEN_ERR,
437 IB_WC_LOC_QP_OP_ERR,
438 IB_WC_LOC_EEC_OP_ERR,
439 IB_WC_LOC_PROT_ERR,
440 IB_WC_WR_FLUSH_ERR,
441 IB_WC_MW_BIND_ERR,
442 IB_WC_BAD_RESP_ERR,
443 IB_WC_LOC_ACCESS_ERR,
444 IB_WC_REM_INV_REQ_ERR,
445 IB_WC_REM_ACCESS_ERR,
446 IB_WC_REM_OP_ERR,
447 IB_WC_RETRY_EXC_ERR,
448 IB_WC_RNR_RETRY_EXC_ERR,
449 IB_WC_LOC_RDD_VIOL_ERR,
450 IB_WC_REM_INV_RD_REQ_ERR,
451 IB_WC_REM_ABORT_ERR,
452 IB_WC_INV_EECN_ERR,
453 IB_WC_INV_EEC_STATE_ERR,
454 IB_WC_FATAL_ERR,
455 IB_WC_RESP_TIMEOUT_ERR,
456 IB_WC_GENERAL_ERR
457};
458
459enum ib_wc_opcode {
460 IB_WC_SEND,
461 IB_WC_RDMA_WRITE,
462 IB_WC_RDMA_READ,
463 IB_WC_COMP_SWAP,
464 IB_WC_FETCH_ADD,
465 IB_WC_BIND_MW,
Eli Cohenc93570f2008-04-16 21:09:27 -0700466 IB_WC_LSO,
Steve Wise00f7ec32008-07-14 23:48:45 -0700467 IB_WC_LOCAL_INV,
468 IB_WC_FAST_REG_MR,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469/*
470 * Set value of IB_WC_RECV so consumers can test if a completion is a
471 * receive by testing (opcode & IB_WC_RECV).
472 */
473 IB_WC_RECV = 1 << 7,
474 IB_WC_RECV_RDMA_WITH_IMM
475};
476
477enum ib_wc_flags {
478 IB_WC_GRH = 1,
Steve Wise00f7ec32008-07-14 23:48:45 -0700479 IB_WC_WITH_IMM = (1<<1),
480 IB_WC_WITH_INVALIDATE = (1<<2),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481};
482
483struct ib_wc {
484 u64 wr_id;
485 enum ib_wc_status status;
486 enum ib_wc_opcode opcode;
487 u32 vendor_err;
488 u32 byte_len;
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200489 struct ib_qp *qp;
Steve Wise00f7ec32008-07-14 23:48:45 -0700490 union {
491 __be32 imm_data;
492 u32 invalidate_rkey;
493 } ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 u32 src_qp;
495 int wc_flags;
496 u16 pkey_index;
497 u16 slid;
498 u8 sl;
499 u8 dlid_path_bits;
500 u8 port_num; /* valid only for DR SMPs on switches */
Eli Cohene0605d92008-01-30 18:30:57 +0200501 int csum_ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502};
503
Roland Dreiered23a722007-05-06 21:02:48 -0700504enum ib_cq_notify_flags {
505 IB_CQ_SOLICITED = 1 << 0,
506 IB_CQ_NEXT_COMP = 1 << 1,
507 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
508 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509};
510
Roland Dreierd41fcc62005-08-18 12:23:08 -0700511enum ib_srq_attr_mask {
512 IB_SRQ_MAX_WR = 1 << 0,
513 IB_SRQ_LIMIT = 1 << 1,
514};
515
516struct ib_srq_attr {
517 u32 max_wr;
518 u32 max_sge;
519 u32 srq_limit;
520};
521
522struct ib_srq_init_attr {
523 void (*event_handler)(struct ib_event *, void *);
524 void *srq_context;
525 struct ib_srq_attr attr;
526};
527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528struct ib_qp_cap {
529 u32 max_send_wr;
530 u32 max_recv_wr;
531 u32 max_send_sge;
532 u32 max_recv_sge;
533 u32 max_inline_data;
534};
535
536enum ib_sig_type {
537 IB_SIGNAL_ALL_WR,
538 IB_SIGNAL_REQ_WR
539};
540
541enum ib_qp_type {
542 /*
543 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
544 * here (and in that order) since the MAD layer uses them as
545 * indices into a 2-entry table.
546 */
547 IB_QPT_SMI,
548 IB_QPT_GSI,
549
550 IB_QPT_RC,
551 IB_QPT_UC,
552 IB_QPT_UD,
553 IB_QPT_RAW_IPV6,
554 IB_QPT_RAW_ETY
555};
556
Eli Cohenb846f252008-04-16 21:09:27 -0700557enum ib_qp_create_flags {
558 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
559};
560
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561struct ib_qp_init_attr {
562 void (*event_handler)(struct ib_event *, void *);
563 void *qp_context;
564 struct ib_cq *send_cq;
565 struct ib_cq *recv_cq;
566 struct ib_srq *srq;
567 struct ib_qp_cap cap;
568 enum ib_sig_type sq_sig_type;
569 enum ib_qp_type qp_type;
Eli Cohenb846f252008-04-16 21:09:27 -0700570 enum ib_qp_create_flags create_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 u8 port_num; /* special QP types only */
572};
573
574enum ib_rnr_timeout {
575 IB_RNR_TIMER_655_36 = 0,
576 IB_RNR_TIMER_000_01 = 1,
577 IB_RNR_TIMER_000_02 = 2,
578 IB_RNR_TIMER_000_03 = 3,
579 IB_RNR_TIMER_000_04 = 4,
580 IB_RNR_TIMER_000_06 = 5,
581 IB_RNR_TIMER_000_08 = 6,
582 IB_RNR_TIMER_000_12 = 7,
583 IB_RNR_TIMER_000_16 = 8,
584 IB_RNR_TIMER_000_24 = 9,
585 IB_RNR_TIMER_000_32 = 10,
586 IB_RNR_TIMER_000_48 = 11,
587 IB_RNR_TIMER_000_64 = 12,
588 IB_RNR_TIMER_000_96 = 13,
589 IB_RNR_TIMER_001_28 = 14,
590 IB_RNR_TIMER_001_92 = 15,
591 IB_RNR_TIMER_002_56 = 16,
592 IB_RNR_TIMER_003_84 = 17,
593 IB_RNR_TIMER_005_12 = 18,
594 IB_RNR_TIMER_007_68 = 19,
595 IB_RNR_TIMER_010_24 = 20,
596 IB_RNR_TIMER_015_36 = 21,
597 IB_RNR_TIMER_020_48 = 22,
598 IB_RNR_TIMER_030_72 = 23,
599 IB_RNR_TIMER_040_96 = 24,
600 IB_RNR_TIMER_061_44 = 25,
601 IB_RNR_TIMER_081_92 = 26,
602 IB_RNR_TIMER_122_88 = 27,
603 IB_RNR_TIMER_163_84 = 28,
604 IB_RNR_TIMER_245_76 = 29,
605 IB_RNR_TIMER_327_68 = 30,
606 IB_RNR_TIMER_491_52 = 31
607};
608
609enum ib_qp_attr_mask {
610 IB_QP_STATE = 1,
611 IB_QP_CUR_STATE = (1<<1),
612 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
613 IB_QP_ACCESS_FLAGS = (1<<3),
614 IB_QP_PKEY_INDEX = (1<<4),
615 IB_QP_PORT = (1<<5),
616 IB_QP_QKEY = (1<<6),
617 IB_QP_AV = (1<<7),
618 IB_QP_PATH_MTU = (1<<8),
619 IB_QP_TIMEOUT = (1<<9),
620 IB_QP_RETRY_CNT = (1<<10),
621 IB_QP_RNR_RETRY = (1<<11),
622 IB_QP_RQ_PSN = (1<<12),
623 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
624 IB_QP_ALT_PATH = (1<<14),
625 IB_QP_MIN_RNR_TIMER = (1<<15),
626 IB_QP_SQ_PSN = (1<<16),
627 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
628 IB_QP_PATH_MIG_STATE = (1<<18),
629 IB_QP_CAP = (1<<19),
630 IB_QP_DEST_QPN = (1<<20)
631};
632
633enum ib_qp_state {
634 IB_QPS_RESET,
635 IB_QPS_INIT,
636 IB_QPS_RTR,
637 IB_QPS_RTS,
638 IB_QPS_SQD,
639 IB_QPS_SQE,
640 IB_QPS_ERR
641};
642
643enum ib_mig_state {
644 IB_MIG_MIGRATED,
645 IB_MIG_REARM,
646 IB_MIG_ARMED
647};
648
649struct ib_qp_attr {
650 enum ib_qp_state qp_state;
651 enum ib_qp_state cur_qp_state;
652 enum ib_mtu path_mtu;
653 enum ib_mig_state path_mig_state;
654 u32 qkey;
655 u32 rq_psn;
656 u32 sq_psn;
657 u32 dest_qp_num;
658 int qp_access_flags;
659 struct ib_qp_cap cap;
660 struct ib_ah_attr ah_attr;
661 struct ib_ah_attr alt_ah_attr;
662 u16 pkey_index;
663 u16 alt_pkey_index;
664 u8 en_sqd_async_notify;
665 u8 sq_draining;
666 u8 max_rd_atomic;
667 u8 max_dest_rd_atomic;
668 u8 min_rnr_timer;
669 u8 port_num;
670 u8 timeout;
671 u8 retry_cnt;
672 u8 rnr_retry;
673 u8 alt_port_num;
674 u8 alt_timeout;
675};
676
677enum ib_wr_opcode {
678 IB_WR_RDMA_WRITE,
679 IB_WR_RDMA_WRITE_WITH_IMM,
680 IB_WR_SEND,
681 IB_WR_SEND_WITH_IMM,
682 IB_WR_RDMA_READ,
683 IB_WR_ATOMIC_CMP_AND_SWP,
Eli Cohenc93570f2008-04-16 21:09:27 -0700684 IB_WR_ATOMIC_FETCH_AND_ADD,
Roland Dreier0f39cf32008-04-16 21:09:32 -0700685 IB_WR_LSO,
686 IB_WR_SEND_WITH_INV,
Steve Wise00f7ec32008-07-14 23:48:45 -0700687 IB_WR_RDMA_READ_WITH_INV,
688 IB_WR_LOCAL_INV,
689 IB_WR_FAST_REG_MR,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690};
691
692enum ib_send_flags {
693 IB_SEND_FENCE = 1,
694 IB_SEND_SIGNALED = (1<<1),
695 IB_SEND_SOLICITED = (1<<2),
Eli Cohene0605d92008-01-30 18:30:57 +0200696 IB_SEND_INLINE = (1<<3),
697 IB_SEND_IP_CSUM = (1<<4)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698};
699
700struct ib_sge {
701 u64 addr;
702 u32 length;
703 u32 lkey;
704};
705
Steve Wise00f7ec32008-07-14 23:48:45 -0700706struct ib_fast_reg_page_list {
707 struct ib_device *device;
708 u64 *page_list;
709 unsigned int max_page_list_len;
710};
711
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712struct ib_send_wr {
713 struct ib_send_wr *next;
714 u64 wr_id;
715 struct ib_sge *sg_list;
716 int num_sge;
717 enum ib_wr_opcode opcode;
718 int send_flags;
Roland Dreier0f39cf32008-04-16 21:09:32 -0700719 union {
720 __be32 imm_data;
721 u32 invalidate_rkey;
722 } ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 union {
724 struct {
725 u64 remote_addr;
726 u32 rkey;
727 } rdma;
728 struct {
729 u64 remote_addr;
730 u64 compare_add;
731 u64 swap;
732 u32 rkey;
733 } atomic;
734 struct {
735 struct ib_ah *ah;
Eli Cohenc93570f2008-04-16 21:09:27 -0700736 void *header;
737 int hlen;
738 int mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 u32 remote_qpn;
740 u32 remote_qkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 u16 pkey_index; /* valid for GSI only */
742 u8 port_num; /* valid for DR SMPs on switch only */
743 } ud;
Steve Wise00f7ec32008-07-14 23:48:45 -0700744 struct {
745 u64 iova_start;
746 struct ib_fast_reg_page_list *page_list;
747 unsigned int page_shift;
748 unsigned int page_list_len;
749 u32 length;
750 int access_flags;
751 u32 rkey;
752 } fast_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 } wr;
754};
755
756struct ib_recv_wr {
757 struct ib_recv_wr *next;
758 u64 wr_id;
759 struct ib_sge *sg_list;
760 int num_sge;
761};
762
763enum ib_access_flags {
764 IB_ACCESS_LOCAL_WRITE = 1,
765 IB_ACCESS_REMOTE_WRITE = (1<<1),
766 IB_ACCESS_REMOTE_READ = (1<<2),
767 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
768 IB_ACCESS_MW_BIND = (1<<4)
769};
770
771struct ib_phys_buf {
772 u64 addr;
773 u64 size;
774};
775
776struct ib_mr_attr {
777 struct ib_pd *pd;
778 u64 device_virt_addr;
779 u64 size;
780 int mr_access_flags;
781 u32 lkey;
782 u32 rkey;
783};
784
785enum ib_mr_rereg_flags {
786 IB_MR_REREG_TRANS = 1,
787 IB_MR_REREG_PD = (1<<1),
788 IB_MR_REREG_ACCESS = (1<<2)
789};
790
791struct ib_mw_bind {
792 struct ib_mr *mr;
793 u64 wr_id;
794 u64 addr;
795 u32 length;
796 int send_flags;
797 int mw_access_flags;
798};
799
800struct ib_fmr_attr {
801 int max_pages;
802 int max_maps;
Or Gerlitzd36f34a2006-02-02 10:43:45 -0800803 u8 page_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804};
805
Roland Dreiere2773c02005-07-07 17:57:10 -0700806struct ib_ucontext {
807 struct ib_device *device;
808 struct list_head pd_list;
809 struct list_head mr_list;
810 struct list_head mw_list;
811 struct list_head cq_list;
812 struct list_head qp_list;
813 struct list_head srq_list;
814 struct list_head ah_list;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800815 int closing;
Roland Dreiere2773c02005-07-07 17:57:10 -0700816};
817
818struct ib_uobject {
819 u64 user_handle; /* handle given to us by userspace */
820 struct ib_ucontext *context; /* associated user context */
Roland Dreier9ead1902006-06-17 20:44:49 -0700821 void *object; /* containing object */
Roland Dreiere2773c02005-07-07 17:57:10 -0700822 struct list_head list; /* link to context's list */
Roland Dreierb3d636b2008-04-16 21:01:06 -0700823 int id; /* index into kernel idr */
Roland Dreier9ead1902006-06-17 20:44:49 -0700824 struct kref ref;
825 struct rw_semaphore mutex; /* protects .live */
826 int live;
Roland Dreiere2773c02005-07-07 17:57:10 -0700827};
828
Roland Dreiere2773c02005-07-07 17:57:10 -0700829struct ib_udata {
830 void __user *inbuf;
831 void __user *outbuf;
832 size_t inlen;
833 size_t outlen;
834};
835
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836struct ib_pd {
Roland Dreiere2773c02005-07-07 17:57:10 -0700837 struct ib_device *device;
838 struct ib_uobject *uobject;
839 atomic_t usecnt; /* count all resources */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840};
841
842struct ib_ah {
843 struct ib_device *device;
844 struct ib_pd *pd;
Roland Dreiere2773c02005-07-07 17:57:10 -0700845 struct ib_uobject *uobject;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846};
847
848typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
849
850struct ib_cq {
Roland Dreiere2773c02005-07-07 17:57:10 -0700851 struct ib_device *device;
852 struct ib_uobject *uobject;
853 ib_comp_handler comp_handler;
854 void (*event_handler)(struct ib_event *, void *);
Dotan Barak4deccd62008-07-14 23:48:44 -0700855 void *cq_context;
Roland Dreiere2773c02005-07-07 17:57:10 -0700856 int cqe;
857 atomic_t usecnt; /* count number of work queues */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858};
859
860struct ib_srq {
Roland Dreierd41fcc62005-08-18 12:23:08 -0700861 struct ib_device *device;
862 struct ib_pd *pd;
863 struct ib_uobject *uobject;
864 void (*event_handler)(struct ib_event *, void *);
865 void *srq_context;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 atomic_t usecnt;
867};
868
869struct ib_qp {
870 struct ib_device *device;
871 struct ib_pd *pd;
872 struct ib_cq *send_cq;
873 struct ib_cq *recv_cq;
874 struct ib_srq *srq;
Roland Dreiere2773c02005-07-07 17:57:10 -0700875 struct ib_uobject *uobject;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 void (*event_handler)(struct ib_event *, void *);
877 void *qp_context;
878 u32 qp_num;
879 enum ib_qp_type qp_type;
880};
881
882struct ib_mr {
Roland Dreiere2773c02005-07-07 17:57:10 -0700883 struct ib_device *device;
884 struct ib_pd *pd;
885 struct ib_uobject *uobject;
886 u32 lkey;
887 u32 rkey;
888 atomic_t usecnt; /* count number of MWs */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889};
890
891struct ib_mw {
892 struct ib_device *device;
893 struct ib_pd *pd;
Roland Dreiere2773c02005-07-07 17:57:10 -0700894 struct ib_uobject *uobject;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 u32 rkey;
896};
897
898struct ib_fmr {
899 struct ib_device *device;
900 struct ib_pd *pd;
901 struct list_head list;
902 u32 lkey;
903 u32 rkey;
904};
905
906struct ib_mad;
907struct ib_grh;
908
909enum ib_process_mad_flags {
910 IB_MAD_IGNORE_MKEY = 1,
911 IB_MAD_IGNORE_BKEY = 2,
912 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
913};
914
915enum ib_mad_result {
916 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
917 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
918 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
919 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
920};
921
922#define IB_DEVICE_NAME_MAX 64
923
924struct ib_cache {
925 rwlock_t lock;
926 struct ib_event_handler event_handler;
927 struct ib_pkey_cache **pkey_cache;
928 struct ib_gid_cache **gid_cache;
Jack Morgenstein6fb9cdb2006-06-17 20:37:34 -0700929 u8 *lmc_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930};
931
Ralph Campbell9b513092006-12-12 14:27:41 -0800932struct ib_dma_mapping_ops {
933 int (*mapping_error)(struct ib_device *dev,
934 u64 dma_addr);
935 u64 (*map_single)(struct ib_device *dev,
936 void *ptr, size_t size,
937 enum dma_data_direction direction);
938 void (*unmap_single)(struct ib_device *dev,
939 u64 addr, size_t size,
940 enum dma_data_direction direction);
941 u64 (*map_page)(struct ib_device *dev,
942 struct page *page, unsigned long offset,
943 size_t size,
944 enum dma_data_direction direction);
945 void (*unmap_page)(struct ib_device *dev,
946 u64 addr, size_t size,
947 enum dma_data_direction direction);
948 int (*map_sg)(struct ib_device *dev,
949 struct scatterlist *sg, int nents,
950 enum dma_data_direction direction);
951 void (*unmap_sg)(struct ib_device *dev,
952 struct scatterlist *sg, int nents,
953 enum dma_data_direction direction);
954 u64 (*dma_address)(struct ib_device *dev,
955 struct scatterlist *sg);
956 unsigned int (*dma_len)(struct ib_device *dev,
957 struct scatterlist *sg);
958 void (*sync_single_for_cpu)(struct ib_device *dev,
959 u64 dma_handle,
960 size_t size,
Dotan Barak4deccd62008-07-14 23:48:44 -0700961 enum dma_data_direction dir);
Ralph Campbell9b513092006-12-12 14:27:41 -0800962 void (*sync_single_for_device)(struct ib_device *dev,
963 u64 dma_handle,
964 size_t size,
965 enum dma_data_direction dir);
966 void *(*alloc_coherent)(struct ib_device *dev,
967 size_t size,
968 u64 *dma_handle,
969 gfp_t flag);
970 void (*free_coherent)(struct ib_device *dev,
971 size_t size, void *cpu_addr,
972 u64 dma_handle);
973};
974
Tom Tucker07ebafb2006-08-03 16:02:42 -0500975struct iw_cm_verbs;
976
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977struct ib_device {
978 struct device *dma_device;
979
980 char name[IB_DEVICE_NAME_MAX];
981
982 struct list_head event_handler_list;
983 spinlock_t event_handler_lock;
984
985 struct list_head core_list;
986 struct list_head client_data_list;
987 spinlock_t client_data_lock;
988
989 struct ib_cache cache;
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300990 int *pkey_tbl_len;
991 int *gid_tbl_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +0300993 int num_comp_vectors;
994
Tom Tucker07ebafb2006-08-03 16:02:42 -0500995 struct iw_cm_verbs *iwcm;
996
Steve Wise7f624d02008-07-14 23:48:48 -0700997 int (*get_protocol_stats)(struct ib_device *device,
998 union rdma_protocol_stats *stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 int (*query_device)(struct ib_device *device,
1000 struct ib_device_attr *device_attr);
1001 int (*query_port)(struct ib_device *device,
1002 u8 port_num,
1003 struct ib_port_attr *port_attr);
1004 int (*query_gid)(struct ib_device *device,
1005 u8 port_num, int index,
1006 union ib_gid *gid);
1007 int (*query_pkey)(struct ib_device *device,
1008 u8 port_num, u16 index, u16 *pkey);
1009 int (*modify_device)(struct ib_device *device,
1010 int device_modify_mask,
1011 struct ib_device_modify *device_modify);
1012 int (*modify_port)(struct ib_device *device,
1013 u8 port_num, int port_modify_mask,
1014 struct ib_port_modify *port_modify);
Roland Dreiere2773c02005-07-07 17:57:10 -07001015 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1016 struct ib_udata *udata);
1017 int (*dealloc_ucontext)(struct ib_ucontext *context);
1018 int (*mmap)(struct ib_ucontext *context,
1019 struct vm_area_struct *vma);
1020 struct ib_pd * (*alloc_pd)(struct ib_device *device,
1021 struct ib_ucontext *context,
1022 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 int (*dealloc_pd)(struct ib_pd *pd);
1024 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1025 struct ib_ah_attr *ah_attr);
1026 int (*modify_ah)(struct ib_ah *ah,
1027 struct ib_ah_attr *ah_attr);
1028 int (*query_ah)(struct ib_ah *ah,
1029 struct ib_ah_attr *ah_attr);
1030 int (*destroy_ah)(struct ib_ah *ah);
Roland Dreierd41fcc62005-08-18 12:23:08 -07001031 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1032 struct ib_srq_init_attr *srq_init_attr,
1033 struct ib_udata *udata);
1034 int (*modify_srq)(struct ib_srq *srq,
1035 struct ib_srq_attr *srq_attr,
Ralph Campbell9bc57e22006-08-11 14:58:09 -07001036 enum ib_srq_attr_mask srq_attr_mask,
1037 struct ib_udata *udata);
Roland Dreierd41fcc62005-08-18 12:23:08 -07001038 int (*query_srq)(struct ib_srq *srq,
1039 struct ib_srq_attr *srq_attr);
1040 int (*destroy_srq)(struct ib_srq *srq);
1041 int (*post_srq_recv)(struct ib_srq *srq,
1042 struct ib_recv_wr *recv_wr,
1043 struct ib_recv_wr **bad_recv_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 struct ib_qp * (*create_qp)(struct ib_pd *pd,
Roland Dreiere2773c02005-07-07 17:57:10 -07001045 struct ib_qp_init_attr *qp_init_attr,
1046 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 int (*modify_qp)(struct ib_qp *qp,
1048 struct ib_qp_attr *qp_attr,
Ralph Campbell9bc57e22006-08-11 14:58:09 -07001049 int qp_attr_mask,
1050 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 int (*query_qp)(struct ib_qp *qp,
1052 struct ib_qp_attr *qp_attr,
1053 int qp_attr_mask,
1054 struct ib_qp_init_attr *qp_init_attr);
1055 int (*destroy_qp)(struct ib_qp *qp);
1056 int (*post_send)(struct ib_qp *qp,
1057 struct ib_send_wr *send_wr,
1058 struct ib_send_wr **bad_send_wr);
1059 int (*post_recv)(struct ib_qp *qp,
1060 struct ib_recv_wr *recv_wr,
1061 struct ib_recv_wr **bad_recv_wr);
Roland Dreiere2773c02005-07-07 17:57:10 -07001062 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03001063 int comp_vector,
Roland Dreiere2773c02005-07-07 17:57:10 -07001064 struct ib_ucontext *context,
1065 struct ib_udata *udata);
Eli Cohen2dd57162008-04-16 21:09:33 -07001066 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1067 u16 cq_period);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 int (*destroy_cq)(struct ib_cq *cq);
Roland Dreier33b9b3e2006-01-30 14:29:21 -08001069 int (*resize_cq)(struct ib_cq *cq, int cqe,
1070 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1072 struct ib_wc *wc);
1073 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1074 int (*req_notify_cq)(struct ib_cq *cq,
Roland Dreiered23a722007-05-06 21:02:48 -07001075 enum ib_cq_notify_flags flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 int (*req_ncomp_notif)(struct ib_cq *cq,
1077 int wc_cnt);
1078 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1079 int mr_access_flags);
1080 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1081 struct ib_phys_buf *phys_buf_array,
1082 int num_phys_buf,
1083 int mr_access_flags,
1084 u64 *iova_start);
Roland Dreiere2773c02005-07-07 17:57:10 -07001085 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08001086 u64 start, u64 length,
1087 u64 virt_addr,
Roland Dreiere2773c02005-07-07 17:57:10 -07001088 int mr_access_flags,
1089 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 int (*query_mr)(struct ib_mr *mr,
1091 struct ib_mr_attr *mr_attr);
1092 int (*dereg_mr)(struct ib_mr *mr);
Steve Wise00f7ec32008-07-14 23:48:45 -07001093 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1094 int max_page_list_len);
1095 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1096 int page_list_len);
1097 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 int (*rereg_phys_mr)(struct ib_mr *mr,
1099 int mr_rereg_mask,
1100 struct ib_pd *pd,
1101 struct ib_phys_buf *phys_buf_array,
1102 int num_phys_buf,
1103 int mr_access_flags,
1104 u64 *iova_start);
1105 struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
1106 int (*bind_mw)(struct ib_qp *qp,
1107 struct ib_mw *mw,
1108 struct ib_mw_bind *mw_bind);
1109 int (*dealloc_mw)(struct ib_mw *mw);
1110 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1111 int mr_access_flags,
1112 struct ib_fmr_attr *fmr_attr);
1113 int (*map_phys_fmr)(struct ib_fmr *fmr,
1114 u64 *page_list, int list_len,
1115 u64 iova);
1116 int (*unmap_fmr)(struct list_head *fmr_list);
1117 int (*dealloc_fmr)(struct ib_fmr *fmr);
1118 int (*attach_mcast)(struct ib_qp *qp,
1119 union ib_gid *gid,
1120 u16 lid);
1121 int (*detach_mcast)(struct ib_qp *qp,
1122 union ib_gid *gid,
1123 u16 lid);
1124 int (*process_mad)(struct ib_device *device,
1125 int process_mad_flags,
1126 u8 port_num,
1127 struct ib_wc *in_wc,
1128 struct ib_grh *in_grh,
1129 struct ib_mad *in_mad,
1130 struct ib_mad *out_mad);
1131
Ralph Campbell9b513092006-12-12 14:27:41 -08001132 struct ib_dma_mapping_ops *dma_ops;
1133
Roland Dreiere2773c02005-07-07 17:57:10 -07001134 struct module *owner;
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001135 struct device dev;
Greg Kroah-Hartman35be0682007-12-17 15:54:39 -04001136 struct kobject *ports_parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 struct list_head port_list;
1138
1139 enum {
1140 IB_DEV_UNINITIALIZED,
1141 IB_DEV_REGISTERED,
1142 IB_DEV_UNREGISTERED
1143 } reg_state;
1144
Roland Dreier883a99c2005-10-14 14:00:58 -07001145 u64 uverbs_cmd_mask;
Roland Dreier274c0892005-09-29 14:17:48 -07001146 int uverbs_abi_ver;
1147
Roland Dreierc5bcbbb2006-02-02 09:47:14 -08001148 char node_desc[64];
Sean Heftycf311cd2006-01-10 07:39:34 -08001149 __be64 node_guid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 u8 node_type;
1151 u8 phys_port_cnt;
1152};
1153
1154struct ib_client {
1155 char *name;
1156 void (*add) (struct ib_device *);
1157 void (*remove)(struct ib_device *);
1158
1159 struct list_head list;
1160};
1161
1162struct ib_device *ib_alloc_device(size_t size);
1163void ib_dealloc_device(struct ib_device *device);
1164
1165int ib_register_device (struct ib_device *device);
1166void ib_unregister_device(struct ib_device *device);
1167
1168int ib_register_client (struct ib_client *client);
1169void ib_unregister_client(struct ib_client *client);
1170
1171void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1172void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1173 void *data);
1174
Roland Dreiere2773c02005-07-07 17:57:10 -07001175static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1176{
1177 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1178}
1179
1180static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1181{
1182 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1183}
1184
Roland Dreier8a518662006-02-13 12:48:12 -08001185/**
1186 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1187 * contains all required attributes and no attributes not allowed for
1188 * the given QP state transition.
1189 * @cur_state: Current QP state
1190 * @next_state: Next QP state
1191 * @type: QP type
1192 * @mask: Mask of supplied QP attributes
1193 *
1194 * This function is a helper function that a low-level driver's
1195 * modify_qp method can use to validate the consumer's input. It
1196 * checks that cur_state and next_state are valid QP states, that a
1197 * transition from cur_state to next_state is allowed by the IB spec,
1198 * and that the attribute mask supplied is allowed for the transition.
1199 */
1200int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1201 enum ib_qp_type type, enum ib_qp_attr_mask mask);
1202
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203int ib_register_event_handler (struct ib_event_handler *event_handler);
1204int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1205void ib_dispatch_event(struct ib_event *event);
1206
1207int ib_query_device(struct ib_device *device,
1208 struct ib_device_attr *device_attr);
1209
1210int ib_query_port(struct ib_device *device,
1211 u8 port_num, struct ib_port_attr *port_attr);
1212
1213int ib_query_gid(struct ib_device *device,
1214 u8 port_num, int index, union ib_gid *gid);
1215
1216int ib_query_pkey(struct ib_device *device,
1217 u8 port_num, u16 index, u16 *pkey);
1218
1219int ib_modify_device(struct ib_device *device,
1220 int device_modify_mask,
1221 struct ib_device_modify *device_modify);
1222
1223int ib_modify_port(struct ib_device *device,
1224 u8 port_num, int port_modify_mask,
1225 struct ib_port_modify *port_modify);
1226
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001227int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1228 u8 *port_num, u16 *index);
1229
1230int ib_find_pkey(struct ib_device *device,
1231 u8 port_num, u16 pkey, u16 *index);
1232
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233/**
1234 * ib_alloc_pd - Allocates an unused protection domain.
1235 * @device: The device on which to allocate the protection domain.
1236 *
1237 * A protection domain object provides an association between QPs, shared
1238 * receive queues, address handles, memory regions, and memory windows.
1239 */
1240struct ib_pd *ib_alloc_pd(struct ib_device *device);
1241
1242/**
1243 * ib_dealloc_pd - Deallocates a protection domain.
1244 * @pd: The protection domain to deallocate.
1245 */
1246int ib_dealloc_pd(struct ib_pd *pd);
1247
1248/**
1249 * ib_create_ah - Creates an address handle for the given address vector.
1250 * @pd: The protection domain associated with the address handle.
1251 * @ah_attr: The attributes of the address vector.
1252 *
1253 * The address handle is used to reference a local or global destination
1254 * in all UD QP post sends.
1255 */
1256struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1257
1258/**
Sean Hefty4e00d692006-06-17 20:37:39 -07001259 * ib_init_ah_from_wc - Initializes address handle attributes from a
1260 * work completion.
1261 * @device: Device on which the received message arrived.
1262 * @port_num: Port on which the received message arrived.
1263 * @wc: Work completion associated with the received message.
1264 * @grh: References the received global route header. This parameter is
1265 * ignored unless the work completion indicates that the GRH is valid.
1266 * @ah_attr: Returned attributes that can be used when creating an address
1267 * handle for replying to the message.
1268 */
1269int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1270 struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1271
1272/**
Hal Rosenstock513789e2005-07-27 11:45:34 -07001273 * ib_create_ah_from_wc - Creates an address handle associated with the
1274 * sender of the specified work completion.
1275 * @pd: The protection domain associated with the address handle.
1276 * @wc: Work completion information associated with a received message.
1277 * @grh: References the received global route header. This parameter is
1278 * ignored unless the work completion indicates that the GRH is valid.
1279 * @port_num: The outbound port number to associate with the address.
1280 *
1281 * The address handle is used to reference a local or global destination
1282 * in all UD QP post sends.
1283 */
1284struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1285 struct ib_grh *grh, u8 port_num);
1286
1287/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 * ib_modify_ah - Modifies the address vector associated with an address
1289 * handle.
1290 * @ah: The address handle to modify.
1291 * @ah_attr: The new address vector attributes to associate with the
1292 * address handle.
1293 */
1294int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1295
1296/**
1297 * ib_query_ah - Queries the address vector associated with an address
1298 * handle.
1299 * @ah: The address handle to query.
1300 * @ah_attr: The address vector attributes associated with the address
1301 * handle.
1302 */
1303int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1304
1305/**
1306 * ib_destroy_ah - Destroys an address handle.
1307 * @ah: The address handle to destroy.
1308 */
1309int ib_destroy_ah(struct ib_ah *ah);
1310
1311/**
Roland Dreierd41fcc62005-08-18 12:23:08 -07001312 * ib_create_srq - Creates a SRQ associated with the specified protection
1313 * domain.
1314 * @pd: The protection domain associated with the SRQ.
Dotan Barakabb6e9b2006-02-23 12:13:51 -08001315 * @srq_init_attr: A list of initial attributes required to create the
1316 * SRQ. If SRQ creation succeeds, then the attributes are updated to
1317 * the actual capabilities of the created SRQ.
Roland Dreierd41fcc62005-08-18 12:23:08 -07001318 *
1319 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1320 * requested size of the SRQ, and set to the actual values allocated
1321 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
1322 * will always be at least as large as the requested values.
1323 */
1324struct ib_srq *ib_create_srq(struct ib_pd *pd,
1325 struct ib_srq_init_attr *srq_init_attr);
1326
1327/**
1328 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1329 * @srq: The SRQ to modify.
1330 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
1331 * the current values of selected SRQ attributes are returned.
1332 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1333 * are being modified.
1334 *
1335 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1336 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1337 * the number of receives queued drops below the limit.
1338 */
1339int ib_modify_srq(struct ib_srq *srq,
1340 struct ib_srq_attr *srq_attr,
1341 enum ib_srq_attr_mask srq_attr_mask);
1342
1343/**
1344 * ib_query_srq - Returns the attribute list and current values for the
1345 * specified SRQ.
1346 * @srq: The SRQ to query.
1347 * @srq_attr: The attributes of the specified SRQ.
1348 */
1349int ib_query_srq(struct ib_srq *srq,
1350 struct ib_srq_attr *srq_attr);
1351
1352/**
1353 * ib_destroy_srq - Destroys the specified SRQ.
1354 * @srq: The SRQ to destroy.
1355 */
1356int ib_destroy_srq(struct ib_srq *srq);
1357
1358/**
1359 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1360 * @srq: The SRQ to post the work request on.
1361 * @recv_wr: A list of work requests to post on the receive queue.
1362 * @bad_recv_wr: On an immediate failure, this parameter will reference
1363 * the work request that failed to be posted on the QP.
1364 */
1365static inline int ib_post_srq_recv(struct ib_srq *srq,
1366 struct ib_recv_wr *recv_wr,
1367 struct ib_recv_wr **bad_recv_wr)
1368{
1369 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1370}
1371
1372/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 * ib_create_qp - Creates a QP associated with the specified protection
1374 * domain.
1375 * @pd: The protection domain associated with the QP.
Dotan Barakabb6e9b2006-02-23 12:13:51 -08001376 * @qp_init_attr: A list of initial attributes required to create the
1377 * QP. If QP creation succeeds, then the attributes are updated to
1378 * the actual capabilities of the created QP.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 */
1380struct ib_qp *ib_create_qp(struct ib_pd *pd,
1381 struct ib_qp_init_attr *qp_init_attr);
1382
1383/**
1384 * ib_modify_qp - Modifies the attributes for the specified QP and then
1385 * transitions the QP to the given state.
1386 * @qp: The QP to modify.
1387 * @qp_attr: On input, specifies the QP attributes to modify. On output,
1388 * the current values of selected QP attributes are returned.
1389 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1390 * are being modified.
1391 */
1392int ib_modify_qp(struct ib_qp *qp,
1393 struct ib_qp_attr *qp_attr,
1394 int qp_attr_mask);
1395
1396/**
1397 * ib_query_qp - Returns the attribute list and current values for the
1398 * specified QP.
1399 * @qp: The QP to query.
1400 * @qp_attr: The attributes of the specified QP.
1401 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1402 * @qp_init_attr: Additional attributes of the selected QP.
1403 *
1404 * The qp_attr_mask may be used to limit the query to gathering only the
1405 * selected attributes.
1406 */
1407int ib_query_qp(struct ib_qp *qp,
1408 struct ib_qp_attr *qp_attr,
1409 int qp_attr_mask,
1410 struct ib_qp_init_attr *qp_init_attr);
1411
1412/**
1413 * ib_destroy_qp - Destroys the specified QP.
1414 * @qp: The QP to destroy.
1415 */
1416int ib_destroy_qp(struct ib_qp *qp);
1417
1418/**
1419 * ib_post_send - Posts a list of work requests to the send queue of
1420 * the specified QP.
1421 * @qp: The QP to post the work request on.
1422 * @send_wr: A list of work requests to post on the send queue.
1423 * @bad_send_wr: On an immediate failure, this parameter will reference
1424 * the work request that failed to be posted on the QP.
1425 */
1426static inline int ib_post_send(struct ib_qp *qp,
1427 struct ib_send_wr *send_wr,
1428 struct ib_send_wr **bad_send_wr)
1429{
1430 return qp->device->post_send(qp, send_wr, bad_send_wr);
1431}
1432
1433/**
1434 * ib_post_recv - Posts a list of work requests to the receive queue of
1435 * the specified QP.
1436 * @qp: The QP to post the work request on.
1437 * @recv_wr: A list of work requests to post on the receive queue.
1438 * @bad_recv_wr: On an immediate failure, this parameter will reference
1439 * the work request that failed to be posted on the QP.
1440 */
1441static inline int ib_post_recv(struct ib_qp *qp,
1442 struct ib_recv_wr *recv_wr,
1443 struct ib_recv_wr **bad_recv_wr)
1444{
1445 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1446}
1447
1448/**
1449 * ib_create_cq - Creates a CQ on the specified device.
1450 * @device: The device on which to create the CQ.
1451 * @comp_handler: A user-specified callback that is invoked when a
1452 * completion event occurs on the CQ.
1453 * @event_handler: A user-specified callback that is invoked when an
1454 * asynchronous event not associated with a completion occurs on the CQ.
1455 * @cq_context: Context associated with the CQ returned to the user via
1456 * the associated completion and event handlers.
1457 * @cqe: The minimum size of the CQ.
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03001458 * @comp_vector - Completion vector used to signal completion events.
1459 * Must be >= 0 and < context->num_comp_vectors.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 *
1461 * Users can examine the cq structure to determine the actual CQ size.
1462 */
1463struct ib_cq *ib_create_cq(struct ib_device *device,
1464 ib_comp_handler comp_handler,
1465 void (*event_handler)(struct ib_event *, void *),
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03001466 void *cq_context, int cqe, int comp_vector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467
1468/**
1469 * ib_resize_cq - Modifies the capacity of the CQ.
1470 * @cq: The CQ to resize.
1471 * @cqe: The minimum size of the CQ.
1472 *
1473 * Users can examine the cq structure to determine the actual CQ size.
1474 */
1475int ib_resize_cq(struct ib_cq *cq, int cqe);
1476
1477/**
Eli Cohen2dd57162008-04-16 21:09:33 -07001478 * ib_modify_cq - Modifies moderation params of the CQ
1479 * @cq: The CQ to modify.
1480 * @cq_count: number of CQEs that will trigger an event
1481 * @cq_period: max period of time in usec before triggering an event
1482 *
1483 */
1484int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1485
1486/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 * ib_destroy_cq - Destroys the specified CQ.
1488 * @cq: The CQ to destroy.
1489 */
1490int ib_destroy_cq(struct ib_cq *cq);
1491
1492/**
1493 * ib_poll_cq - poll a CQ for completion(s)
1494 * @cq:the CQ being polled
1495 * @num_entries:maximum number of completions to return
1496 * @wc:array of at least @num_entries &struct ib_wc where completions
1497 * will be returned
1498 *
1499 * Poll a CQ for (possibly multiple) completions. If the return value
1500 * is < 0, an error occurred. If the return value is >= 0, it is the
1501 * number of completions returned. If the return value is
1502 * non-negative and < num_entries, then the CQ was emptied.
1503 */
1504static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1505 struct ib_wc *wc)
1506{
1507 return cq->device->poll_cq(cq, num_entries, wc);
1508}
1509
1510/**
1511 * ib_peek_cq - Returns the number of unreaped completions currently
1512 * on the specified CQ.
1513 * @cq: The CQ to peek.
1514 * @wc_cnt: A minimum number of unreaped completions to check for.
1515 *
1516 * If the number of unreaped completions is greater than or equal to wc_cnt,
1517 * this function returns wc_cnt, otherwise, it returns the actual number of
1518 * unreaped completions.
1519 */
1520int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1521
1522/**
1523 * ib_req_notify_cq - Request completion notification on a CQ.
1524 * @cq: The CQ to generate an event for.
Roland Dreiered23a722007-05-06 21:02:48 -07001525 * @flags:
1526 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1527 * to request an event on the next solicited event or next work
1528 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1529 * may also be |ed in to request a hint about missed events, as
1530 * described below.
1531 *
1532 * Return Value:
1533 * < 0 means an error occurred while requesting notification
1534 * == 0 means notification was requested successfully, and if
1535 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1536 * were missed and it is safe to wait for another event. In
1537 * this case is it guaranteed that any work completions added
1538 * to the CQ since the last CQ poll will trigger a completion
1539 * notification event.
1540 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1541 * in. It means that the consumer must poll the CQ again to
1542 * make sure it is empty to avoid missing an event because of a
1543 * race between requesting notification and an entry being
1544 * added to the CQ. This return value means it is possible
1545 * (but not guaranteed) that a work completion has been added
1546 * to the CQ since the last poll without triggering a
1547 * completion notification event.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 */
1549static inline int ib_req_notify_cq(struct ib_cq *cq,
Roland Dreiered23a722007-05-06 21:02:48 -07001550 enum ib_cq_notify_flags flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551{
Roland Dreiered23a722007-05-06 21:02:48 -07001552 return cq->device->req_notify_cq(cq, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553}
1554
1555/**
1556 * ib_req_ncomp_notif - Request completion notification when there are
1557 * at least the specified number of unreaped completions on the CQ.
1558 * @cq: The CQ to generate an event for.
1559 * @wc_cnt: The number of unreaped completions that should be on the
1560 * CQ before an event is generated.
1561 */
1562static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1563{
1564 return cq->device->req_ncomp_notif ?
1565 cq->device->req_ncomp_notif(cq, wc_cnt) :
1566 -ENOSYS;
1567}
1568
1569/**
1570 * ib_get_dma_mr - Returns a memory region for system memory that is
1571 * usable for DMA.
1572 * @pd: The protection domain associated with the memory region.
1573 * @mr_access_flags: Specifies the memory access rights.
Ralph Campbell9b513092006-12-12 14:27:41 -08001574 *
1575 * Note that the ib_dma_*() functions defined below must be used
1576 * to create/destroy addresses used with the Lkey or Rkey returned
1577 * by ib_get_dma_mr().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 */
1579struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1580
1581/**
Ralph Campbell9b513092006-12-12 14:27:41 -08001582 * ib_dma_mapping_error - check a DMA addr for error
1583 * @dev: The device for which the dma_addr was created
1584 * @dma_addr: The DMA address to check
1585 */
1586static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1587{
Ben Collinsd1998ef2006-12-13 22:10:05 -05001588 if (dev->dma_ops)
1589 return dev->dma_ops->mapping_error(dev, dma_addr);
1590 return dma_mapping_error(dma_addr);
Ralph Campbell9b513092006-12-12 14:27:41 -08001591}
1592
1593/**
1594 * ib_dma_map_single - Map a kernel virtual address to DMA address
1595 * @dev: The device for which the dma_addr is to be created
1596 * @cpu_addr: The kernel virtual address
1597 * @size: The size of the region in bytes
1598 * @direction: The direction of the DMA
1599 */
1600static inline u64 ib_dma_map_single(struct ib_device *dev,
1601 void *cpu_addr, size_t size,
1602 enum dma_data_direction direction)
1603{
Ben Collinsd1998ef2006-12-13 22:10:05 -05001604 if (dev->dma_ops)
1605 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1606 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08001607}
1608
1609/**
1610 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1611 * @dev: The device for which the DMA address was created
1612 * @addr: The DMA address
1613 * @size: The size of the region in bytes
1614 * @direction: The direction of the DMA
1615 */
1616static inline void ib_dma_unmap_single(struct ib_device *dev,
1617 u64 addr, size_t size,
1618 enum dma_data_direction direction)
1619{
Ben Collinsd1998ef2006-12-13 22:10:05 -05001620 if (dev->dma_ops)
1621 dev->dma_ops->unmap_single(dev, addr, size, direction);
1622 else
Ralph Campbell9b513092006-12-12 14:27:41 -08001623 dma_unmap_single(dev->dma_device, addr, size, direction);
1624}
1625
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07001626static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1627 void *cpu_addr, size_t size,
1628 enum dma_data_direction direction,
1629 struct dma_attrs *attrs)
1630{
1631 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1632 direction, attrs);
1633}
1634
1635static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1636 u64 addr, size_t size,
1637 enum dma_data_direction direction,
1638 struct dma_attrs *attrs)
1639{
1640 return dma_unmap_single_attrs(dev->dma_device, addr, size,
1641 direction, attrs);
1642}
1643
Ralph Campbell9b513092006-12-12 14:27:41 -08001644/**
1645 * ib_dma_map_page - Map a physical page to DMA address
1646 * @dev: The device for which the dma_addr is to be created
1647 * @page: The page to be mapped
1648 * @offset: The offset within the page
1649 * @size: The size of the region in bytes
1650 * @direction: The direction of the DMA
1651 */
1652static inline u64 ib_dma_map_page(struct ib_device *dev,
1653 struct page *page,
1654 unsigned long offset,
1655 size_t size,
1656 enum dma_data_direction direction)
1657{
Ben Collinsd1998ef2006-12-13 22:10:05 -05001658 if (dev->dma_ops)
1659 return dev->dma_ops->map_page(dev, page, offset, size, direction);
1660 return dma_map_page(dev->dma_device, page, offset, size, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08001661}
1662
1663/**
1664 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1665 * @dev: The device for which the DMA address was created
1666 * @addr: The DMA address
1667 * @size: The size of the region in bytes
1668 * @direction: The direction of the DMA
1669 */
1670static inline void ib_dma_unmap_page(struct ib_device *dev,
1671 u64 addr, size_t size,
1672 enum dma_data_direction direction)
1673{
Ben Collinsd1998ef2006-12-13 22:10:05 -05001674 if (dev->dma_ops)
1675 dev->dma_ops->unmap_page(dev, addr, size, direction);
1676 else
Ralph Campbell9b513092006-12-12 14:27:41 -08001677 dma_unmap_page(dev->dma_device, addr, size, direction);
1678}
1679
1680/**
1681 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1682 * @dev: The device for which the DMA addresses are to be created
1683 * @sg: The array of scatter/gather entries
1684 * @nents: The number of scatter/gather entries
1685 * @direction: The direction of the DMA
1686 */
1687static inline int ib_dma_map_sg(struct ib_device *dev,
1688 struct scatterlist *sg, int nents,
1689 enum dma_data_direction direction)
1690{
Ben Collinsd1998ef2006-12-13 22:10:05 -05001691 if (dev->dma_ops)
1692 return dev->dma_ops->map_sg(dev, sg, nents, direction);
1693 return dma_map_sg(dev->dma_device, sg, nents, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08001694}
1695
1696/**
1697 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1698 * @dev: The device for which the DMA addresses were created
1699 * @sg: The array of scatter/gather entries
1700 * @nents: The number of scatter/gather entries
1701 * @direction: The direction of the DMA
1702 */
1703static inline void ib_dma_unmap_sg(struct ib_device *dev,
1704 struct scatterlist *sg, int nents,
1705 enum dma_data_direction direction)
1706{
Ben Collinsd1998ef2006-12-13 22:10:05 -05001707 if (dev->dma_ops)
1708 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1709 else
Ralph Campbell9b513092006-12-12 14:27:41 -08001710 dma_unmap_sg(dev->dma_device, sg, nents, direction);
1711}
1712
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07001713static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1714 struct scatterlist *sg, int nents,
1715 enum dma_data_direction direction,
1716 struct dma_attrs *attrs)
1717{
1718 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1719}
1720
1721static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1722 struct scatterlist *sg, int nents,
1723 enum dma_data_direction direction,
1724 struct dma_attrs *attrs)
1725{
1726 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1727}
Ralph Campbell9b513092006-12-12 14:27:41 -08001728/**
1729 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1730 * @dev: The device for which the DMA addresses were created
1731 * @sg: The scatter/gather entry
1732 */
1733static inline u64 ib_sg_dma_address(struct ib_device *dev,
1734 struct scatterlist *sg)
1735{
Ben Collinsd1998ef2006-12-13 22:10:05 -05001736 if (dev->dma_ops)
1737 return dev->dma_ops->dma_address(dev, sg);
1738 return sg_dma_address(sg);
Ralph Campbell9b513092006-12-12 14:27:41 -08001739}
1740
1741/**
1742 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1743 * @dev: The device for which the DMA addresses were created
1744 * @sg: The scatter/gather entry
1745 */
1746static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1747 struct scatterlist *sg)
1748{
Ben Collinsd1998ef2006-12-13 22:10:05 -05001749 if (dev->dma_ops)
1750 return dev->dma_ops->dma_len(dev, sg);
1751 return sg_dma_len(sg);
Ralph Campbell9b513092006-12-12 14:27:41 -08001752}
1753
1754/**
1755 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1756 * @dev: The device for which the DMA address was created
1757 * @addr: The DMA address
1758 * @size: The size of the region in bytes
1759 * @dir: The direction of the DMA
1760 */
1761static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1762 u64 addr,
1763 size_t size,
1764 enum dma_data_direction dir)
1765{
Ben Collinsd1998ef2006-12-13 22:10:05 -05001766 if (dev->dma_ops)
1767 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1768 else
Ralph Campbell9b513092006-12-12 14:27:41 -08001769 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1770}
1771
1772/**
1773 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1774 * @dev: The device for which the DMA address was created
1775 * @addr: The DMA address
1776 * @size: The size of the region in bytes
1777 * @dir: The direction of the DMA
1778 */
1779static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1780 u64 addr,
1781 size_t size,
1782 enum dma_data_direction dir)
1783{
Ben Collinsd1998ef2006-12-13 22:10:05 -05001784 if (dev->dma_ops)
1785 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1786 else
Ralph Campbell9b513092006-12-12 14:27:41 -08001787 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1788}
1789
1790/**
1791 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1792 * @dev: The device for which the DMA address is requested
1793 * @size: The size of the region to allocate in bytes
1794 * @dma_handle: A pointer for returning the DMA address of the region
1795 * @flag: memory allocator flags
1796 */
1797static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1798 size_t size,
1799 u64 *dma_handle,
1800 gfp_t flag)
1801{
Ben Collinsd1998ef2006-12-13 22:10:05 -05001802 if (dev->dma_ops)
1803 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
Roland Dreierc59a3da2006-12-15 13:57:26 -08001804 else {
1805 dma_addr_t handle;
1806 void *ret;
1807
1808 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1809 *dma_handle = handle;
1810 return ret;
1811 }
Ralph Campbell9b513092006-12-12 14:27:41 -08001812}
1813
1814/**
1815 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1816 * @dev: The device for which the DMA addresses were allocated
1817 * @size: The size of the region
1818 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1819 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1820 */
1821static inline void ib_dma_free_coherent(struct ib_device *dev,
1822 size_t size, void *cpu_addr,
1823 u64 dma_handle)
1824{
Ben Collinsd1998ef2006-12-13 22:10:05 -05001825 if (dev->dma_ops)
1826 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1827 else
Ralph Campbell9b513092006-12-12 14:27:41 -08001828 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1829}
1830
1831/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1833 * by an HCA.
1834 * @pd: The protection domain associated assigned to the registered region.
1835 * @phys_buf_array: Specifies a list of physical buffers to use in the
1836 * memory region.
1837 * @num_phys_buf: Specifies the size of the phys_buf_array.
1838 * @mr_access_flags: Specifies the memory access rights.
1839 * @iova_start: The offset of the region's starting I/O virtual address.
1840 */
1841struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1842 struct ib_phys_buf *phys_buf_array,
1843 int num_phys_buf,
1844 int mr_access_flags,
1845 u64 *iova_start);
1846
1847/**
1848 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1849 * Conceptually, this call performs the functions deregister memory region
1850 * followed by register physical memory region. Where possible,
1851 * resources are reused instead of deallocated and reallocated.
1852 * @mr: The memory region to modify.
1853 * @mr_rereg_mask: A bit-mask used to indicate which of the following
1854 * properties of the memory region are being modified.
1855 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1856 * the new protection domain to associated with the memory region,
1857 * otherwise, this parameter is ignored.
1858 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1859 * field specifies a list of physical buffers to use in the new
1860 * translation, otherwise, this parameter is ignored.
1861 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1862 * field specifies the size of the phys_buf_array, otherwise, this
1863 * parameter is ignored.
1864 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1865 * field specifies the new memory access rights, otherwise, this
1866 * parameter is ignored.
1867 * @iova_start: The offset of the region's starting I/O virtual address.
1868 */
1869int ib_rereg_phys_mr(struct ib_mr *mr,
1870 int mr_rereg_mask,
1871 struct ib_pd *pd,
1872 struct ib_phys_buf *phys_buf_array,
1873 int num_phys_buf,
1874 int mr_access_flags,
1875 u64 *iova_start);
1876
1877/**
1878 * ib_query_mr - Retrieves information about a specific memory region.
1879 * @mr: The memory region to retrieve information about.
1880 * @mr_attr: The attributes of the specified memory region.
1881 */
1882int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1883
1884/**
1885 * ib_dereg_mr - Deregisters a memory region and removes it from the
1886 * HCA translation table.
1887 * @mr: The memory region to deregister.
1888 */
1889int ib_dereg_mr(struct ib_mr *mr);
1890
1891/**
Steve Wise00f7ec32008-07-14 23:48:45 -07001892 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
1893 * IB_WR_FAST_REG_MR send work request.
1894 * @pd: The protection domain associated with the region.
1895 * @max_page_list_len: requested max physical buffer list length to be
1896 * used with fast register work requests for this MR.
1897 */
1898struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
1899
1900/**
1901 * ib_alloc_fast_reg_page_list - Allocates a page list array
1902 * @device - ib device pointer.
1903 * @page_list_len - size of the page list array to be allocated.
1904 *
1905 * This allocates and returns a struct ib_fast_reg_page_list * and a
1906 * page_list array that is at least page_list_len in size. The actual
1907 * size is returned in max_page_list_len. The caller is responsible
1908 * for initializing the contents of the page_list array before posting
1909 * a send work request with the IB_WC_FAST_REG_MR opcode.
1910 *
1911 * The page_list array entries must be translated using one of the
1912 * ib_dma_*() functions just like the addresses passed to
1913 * ib_map_phys_fmr(). Once the ib_post_send() is issued, the struct
1914 * ib_fast_reg_page_list must not be modified by the caller until the
1915 * IB_WC_FAST_REG_MR work request completes.
1916 */
1917struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
1918 struct ib_device *device, int page_list_len);
1919
1920/**
1921 * ib_free_fast_reg_page_list - Deallocates a previously allocated
1922 * page list array.
1923 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
1924 */
1925void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
1926
1927/**
1928 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
1929 * R_Key and L_Key.
1930 * @mr - struct ib_mr pointer to be updated.
1931 * @newkey - new key to be used.
1932 */
1933static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
1934{
1935 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
1936 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
1937}
1938
1939/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 * ib_alloc_mw - Allocates a memory window.
1941 * @pd: The protection domain associated with the memory window.
1942 */
1943struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1944
1945/**
1946 * ib_bind_mw - Posts a work request to the send queue of the specified
1947 * QP, which binds the memory window to the given address range and
1948 * remote access attributes.
1949 * @qp: QP to post the bind work request on.
1950 * @mw: The memory window to bind.
1951 * @mw_bind: Specifies information about the memory window, including
1952 * its address range, remote access rights, and associated memory region.
1953 */
1954static inline int ib_bind_mw(struct ib_qp *qp,
1955 struct ib_mw *mw,
1956 struct ib_mw_bind *mw_bind)
1957{
1958 /* XXX reference counting in corresponding MR? */
1959 return mw->device->bind_mw ?
1960 mw->device->bind_mw(qp, mw, mw_bind) :
1961 -ENOSYS;
1962}
1963
1964/**
1965 * ib_dealloc_mw - Deallocates a memory window.
1966 * @mw: The memory window to deallocate.
1967 */
1968int ib_dealloc_mw(struct ib_mw *mw);
1969
1970/**
1971 * ib_alloc_fmr - Allocates a unmapped fast memory region.
1972 * @pd: The protection domain associated with the unmapped region.
1973 * @mr_access_flags: Specifies the memory access rights.
1974 * @fmr_attr: Attributes of the unmapped region.
1975 *
1976 * A fast memory region must be mapped before it can be used as part of
1977 * a work request.
1978 */
1979struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1980 int mr_access_flags,
1981 struct ib_fmr_attr *fmr_attr);
1982
1983/**
1984 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
1985 * @fmr: The fast memory region to associate with the pages.
1986 * @page_list: An array of physical pages to map to the fast memory region.
1987 * @list_len: The number of pages in page_list.
1988 * @iova: The I/O virtual address to use with the mapped region.
1989 */
1990static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
1991 u64 *page_list, int list_len,
1992 u64 iova)
1993{
1994 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
1995}
1996
1997/**
1998 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
1999 * @fmr_list: A linked list of fast memory regions to unmap.
2000 */
2001int ib_unmap_fmr(struct list_head *fmr_list);
2002
2003/**
2004 * ib_dealloc_fmr - Deallocates a fast memory region.
2005 * @fmr: The fast memory region to deallocate.
2006 */
2007int ib_dealloc_fmr(struct ib_fmr *fmr);
2008
2009/**
2010 * ib_attach_mcast - Attaches the specified QP to a multicast group.
2011 * @qp: QP to attach to the multicast group. The QP must be type
2012 * IB_QPT_UD.
2013 * @gid: Multicast group GID.
2014 * @lid: Multicast group LID in host byte order.
2015 *
2016 * In order to send and receive multicast packets, subnet
2017 * administration must have created the multicast group and configured
2018 * the fabric appropriately. The port associated with the specified
2019 * QP must also be a member of the multicast group.
2020 */
2021int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2022
2023/**
2024 * ib_detach_mcast - Detaches the specified QP from a multicast group.
2025 * @qp: QP to detach from the multicast group.
2026 * @gid: Multicast group GID.
2027 * @lid: Multicast group LID in host byte order.
2028 */
2029int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2030
2031#endif /* IB_VERBS_H */