blob: 3da47e0a4a1ffdfce7369980dca71c58cf1606a5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/dst.h Protocol independent destination cache definitions.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 */
7
8#ifndef _NET_DST_H
9#define _NET_DST_H
10
Alexey Dobriyan86393e52009-08-29 01:34:49 +000011#include <net/dst_ops.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020012#include <linux/netdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/rtnetlink.h>
14#include <linux/rcupdate.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050015#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/jiffies.h>
17#include <net/neighbour.h>
18#include <asm/processor.h>
19
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#define DST_GC_MIN (HZ/10)
21#define DST_GC_INC (HZ/2)
22#define DST_GC_MAX (120*HZ)
23
24/* Each dst_entry has reference count and sits in some parent list(s).
25 * When it is removed from parent list, it is "freed" (dst_free).
26 * After this it enters dead state (dst->obsolete > 0) and if its refcnt
27 * is zero, it can be destroyed immediately, otherwise it is added
28 * to gc list and garbage collector periodically checks the refcnt.
29 */
30
31struct sk_buff;
32
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000033struct dst_entry {
Eric Dumazet1e19e022007-02-09 16:26:55 -080034 struct rcu_head rcu_head;
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 struct dst_entry *child;
36 struct net_device *dev;
David S. Miller62fa8a82011-01-26 20:51:05 -080037 struct dst_ops *ops;
38 unsigned long _metrics;
Gao feng1716a962012-04-06 00:13:10 +000039 union {
40 unsigned long expires;
41 /* point to where the dst_entry copied from */
42 struct dst_entry *from;
43 };
Zhang Yanminf1dd9c32008-03-12 22:52:37 -070044 struct dst_entry *path;
David S. Miller36bdbca2012-07-02 22:58:02 -070045 void *__pad0;
Alexey Dobriyandef8b4f2008-10-28 13:24:06 -070046#ifdef CONFIG_XFRM
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 struct xfrm_state *xfrm;
Eric Dumazet5635c102008-11-16 19:46:36 -080048#else
49 void *__pad1;
Alexey Dobriyandef8b4f2008-10-28 13:24:06 -070050#endif
Eldad Zack7f95e182012-06-16 15:14:49 +020051 int (*input)(struct sk_buff *);
52 int (*output)(struct sk_buff *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
David S. Miller5110effe2012-07-02 02:21:03 -070054 unsigned short flags;
David S. Millerf6b72b62011-07-14 07:53:20 -070055#define DST_HOST 0x0001
56#define DST_NOXFRM 0x0002
57#define DST_NOPOLICY 0x0004
58#define DST_NOHASH 0x0008
59#define DST_NOCACHE 0x0010
60#define DST_NOCOUNT 0x0020
Eric Dumazete688a602011-12-22 04:15:53 +000061#define DST_NOPEER 0x0040
Peter Huang (Peng)a881e962012-04-19 20:12:51 +000062#define DST_FAKE_RTABLE 0x0080
Gao feng0c183372012-05-26 01:30:53 +000063#define DST_XFRM_TUNNEL 0x0100
Steffen Klasserta0073fe2013-02-05 12:52:55 +010064#define DST_XFRM_QUEUE 0x0200
David S. Millerf6b72b62011-07-14 07:53:20 -070065
David S. Miller5110effe2012-07-02 02:21:03 -070066 unsigned short pending_confirm;
67
David S. Miller62fa8a82011-01-26 20:51:05 -080068 short error;
David S. Millerf5b0a872012-07-19 12:31:33 -070069
70 /* A non-zero value of dst->obsolete forces by-hand validation
71 * of the route entry. Positive values are set by the generic
72 * dst layer to indicate that the entry has been forcefully
73 * destroyed.
74 *
75 * Negative values are used by the implementation layer code to
76 * force invocation of the dst_ops->check() method.
77 */
David S. Miller62fa8a82011-01-26 20:51:05 -080078 short obsolete;
David S. Millerf5b0a872012-07-19 12:31:33 -070079#define DST_OBSOLETE_NONE 0
80#define DST_OBSOLETE_DEAD 2
81#define DST_OBSOLETE_FORCE_CHK -1
David S. Millerceb33202012-07-17 11:31:28 -070082#define DST_OBSOLETE_KILL -2
David S. Miller62fa8a82011-01-26 20:51:05 -080083 unsigned short header_len; /* more space at head required */
84 unsigned short trailer_len; /* space to reserve at tail */
Patrick McHardyc7066f72011-01-14 13:36:42 +010085#ifdef CONFIG_IP_ROUTE_CLASSID
Zhang Yanminf1dd9c32008-03-12 22:52:37 -070086 __u32 tclassid;
Eric Dumazet5635c102008-11-16 19:46:36 -080087#else
88 __u32 __pad2;
Zhang Yanminf1dd9c32008-03-12 22:52:37 -070089#endif
90
Eric Dumazet5635c102008-11-16 19:46:36 -080091 /*
92 * Align __refcnt to a 64 bytes alignment
93 * (L1_CACHE_SIZE would be too much)
94 */
95#ifdef CONFIG_64BIT
David S. Millerf6b72b62011-07-14 07:53:20 -070096 long __pad_to_align_refcnt[2];
Eric Dumazet5635c102008-11-16 19:46:36 -080097#endif
Zhang Yanminf1dd9c32008-03-12 22:52:37 -070098 /*
99 * __refcnt wants to be on a different cache line from
100 * input/output/ops or performance tanks badly
101 */
Eric Dumazet1e19e022007-02-09 16:26:55 -0800102 atomic_t __refcnt; /* client references */
103 int __use;
Zhang Yanminf1dd9c32008-03-12 22:52:37 -0700104 unsigned long lastuse;
Eric Dumazet1e19e022007-02-09 16:26:55 -0800105 union {
Eric Dumazetfc766e42010-10-29 03:09:24 +0000106 struct dst_entry *next;
107 struct rtable __rcu *rt_next;
108 struct rt6_info *rt6_next;
109 struct dn_route __rcu *dn_next;
Eric Dumazet1e19e022007-02-09 16:26:55 -0800110 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111};
112
David S. Miller62fa8a82011-01-26 20:51:05 -0800113extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
Eric Dumazeta37e6e32012-08-07 10:55:45 +0000114extern const u32 dst_default_metrics[];
David S. Miller62fa8a82011-01-26 20:51:05 -0800115
116#define DST_METRICS_READ_ONLY 0x1UL
117#define __DST_METRICS_PTR(Y) \
118 ((u32 *)((Y) & ~DST_METRICS_READ_ONLY))
119#define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics)
120
121static inline bool dst_metrics_read_only(const struct dst_entry *dst)
122{
123 return dst->_metrics & DST_METRICS_READ_ONLY;
124}
125
126extern void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
127
128static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
129{
130 unsigned long val = dst->_metrics;
131 if (!(val & DST_METRICS_READ_ONLY))
132 __dst_destroy_metrics_generic(dst, val);
133}
134
135static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
136{
137 unsigned long p = dst->_metrics;
138
Stephen Hemminger1f370702011-05-24 13:50:52 -0400139 BUG_ON(!p);
140
David S. Miller62fa8a82011-01-26 20:51:05 -0800141 if (p & DST_METRICS_READ_ONLY)
142 return dst->ops->cow_metrics(dst, p);
143 return __DST_METRICS_PTR(p);
144}
145
146/* This may only be invoked before the entry has reached global
147 * visibility.
148 */
149static inline void dst_init_metrics(struct dst_entry *dst,
150 const u32 *src_metrics,
151 bool read_only)
152{
153 dst->_metrics = ((unsigned long) src_metrics) |
154 (read_only ? DST_METRICS_READ_ONLY : 0);
155}
156
157static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
158{
159 u32 *dst_metrics = dst_metrics_write_ptr(dest);
160
161 if (dst_metrics) {
162 u32 *src_metrics = DST_METRICS_PTR(src);
163
164 memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
165 }
166}
167
168static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
169{
170 return DST_METRICS_PTR(dst);
171}
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173static inline u32
David S. Miller5170ae82010-12-12 21:35:57 -0800174dst_metric_raw(const struct dst_entry *dst, const int metric)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175{
David S. Miller62fa8a82011-01-26 20:51:05 -0800176 u32 *p = DST_METRICS_PTR(dst);
177
178 return p[metric-1];
David S. Millerdefb3512010-12-08 21:16:57 -0800179}
180
David S. Miller5170ae82010-12-12 21:35:57 -0800181static inline u32
182dst_metric(const struct dst_entry *dst, const int metric)
183{
David S. Miller0dbaee32010-12-13 12:52:14 -0800184 WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
David S. Millerd33e4552010-12-14 13:01:14 -0800185 metric == RTAX_ADVMSS ||
186 metric == RTAX_MTU);
David S. Miller5170ae82010-12-12 21:35:57 -0800187 return dst_metric_raw(dst, metric);
188}
189
David S. Miller0dbaee32010-12-13 12:52:14 -0800190static inline u32
191dst_metric_advmss(const struct dst_entry *dst)
192{
193 u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
194
195 if (!advmss)
196 advmss = dst->ops->default_advmss(dst);
197
198 return advmss;
199}
200
David S. Millerdefb3512010-12-08 21:16:57 -0800201static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
202{
David S. Miller62fa8a82011-01-26 20:51:05 -0800203 u32 *p = dst_metrics_write_ptr(dst);
David S. Millerdefb3512010-12-08 21:16:57 -0800204
David S. Miller62fa8a82011-01-26 20:51:05 -0800205 if (p)
206 p[metric-1] = val;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207}
208
Gilad Ben-Yossef0c3adfb2009-10-28 04:15:23 +0000209static inline u32
210dst_feature(const struct dst_entry *dst, u32 feature)
211{
David S. Millerbb5b7c12009-12-15 20:56:42 -0800212 return dst_metric(dst, RTAX_FEATURES) & feature;
Gilad Ben-Yossef0c3adfb2009-10-28 04:15:23 +0000213}
214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215static inline u32 dst_mtu(const struct dst_entry *dst)
216{
Steffen Klassert618f9bc2011-11-23 02:13:31 +0000217 return dst->ops->mtu(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218}
219
Stephen Hemmingerc1e20f72008-07-18 23:02:15 -0700220/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
221static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
222{
223 return msecs_to_jiffies(dst_metric(dst, metric));
224}
225
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226static inline u32
227dst_allfrag(const struct dst_entry *dst)
228{
Gilad Ben-Yossef0c3adfb2009-10-28 04:15:23 +0000229 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 return ret;
231}
232
233static inline int
David S. Millerd33e4552010-12-14 13:01:14 -0800234dst_metric_locked(const struct dst_entry *dst, int metric)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235{
236 return dst_metric(dst, RTAX_LOCK) & (1<<metric);
237}
238
Eldad Zack7f95e182012-06-16 15:14:49 +0200239static inline void dst_hold(struct dst_entry *dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240{
Eric Dumazet5635c102008-11-16 19:46:36 -0800241 /*
242 * If your kernel compilation stops here, please check
243 * __pad_to_align_refcnt declaration in struct dst_entry
244 */
245 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 atomic_inc(&dst->__refcnt);
247}
248
Pavel Emelyanov03f49f32007-11-10 21:28:34 -0800249static inline void dst_use(struct dst_entry *dst, unsigned long time)
250{
251 dst_hold(dst);
252 dst->__use++;
253 dst->lastuse = time;
254}
255
Eric Dumazet7fee2262010-05-11 23:19:48 +0000256static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
257{
258 dst->__use++;
259 dst->lastuse = time;
260}
261
Eldad Zack7f95e182012-06-16 15:14:49 +0200262static inline struct dst_entry *dst_clone(struct dst_entry *dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263{
264 if (dst)
265 atomic_inc(&dst->__refcnt);
266 return dst;
267}
268
Ilpo Järvinen8d330862008-03-27 17:53:31 -0700269extern void dst_release(struct dst_entry *dst);
Eric Dumazet7fee2262010-05-11 23:19:48 +0000270
271static inline void refdst_drop(unsigned long refdst)
272{
273 if (!(refdst & SKB_DST_NOREF))
274 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
275}
276
277/**
278 * skb_dst_drop - drops skb dst
279 * @skb: buffer
280 *
281 * Drops dst reference count if a reference was taken.
282 */
Eric Dumazetadf30902009-06-02 05:19:30 +0000283static inline void skb_dst_drop(struct sk_buff *skb)
284{
Eric Dumazet7fee2262010-05-11 23:19:48 +0000285 if (skb->_skb_refdst) {
286 refdst_drop(skb->_skb_refdst);
287 skb->_skb_refdst = 0UL;
288 }
289}
290
291static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
292{
293 nskb->_skb_refdst = oskb->_skb_refdst;
294 if (!(nskb->_skb_refdst & SKB_DST_NOREF))
295 dst_clone(skb_dst(nskb));
296}
297
298/**
299 * skb_dst_force - makes sure skb dst is refcounted
300 * @skb: buffer
301 *
302 * If dst is not yet refcounted, let's do it
303 */
304static inline void skb_dst_force(struct sk_buff *skb)
305{
306 if (skb_dst_is_noref(skb)) {
307 WARN_ON(!rcu_read_lock_held());
308 skb->_skb_refdst &= ~SKB_DST_NOREF;
309 dst_clone(skb_dst(skb));
310 }
Eric Dumazetadf30902009-06-02 05:19:30 +0000311}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
Eric Dumazetd19d56d2010-05-17 22:36:55 -0700313
314/**
Eric Dumazet290b8952010-09-27 00:33:35 +0000315 * __skb_tunnel_rx - prepare skb for rx reinsert
316 * @skb: buffer
317 * @dev: tunnel device
318 *
319 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
320 * so make some cleanups. (no accounting done)
321 */
322static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
323{
324 skb->dev = dev;
Tom Herbertbdeab992011-08-14 19:45:55 +0000325
326 /*
327 * Clear rxhash so that we can recalulate the hash for the
328 * encapsulated packet, unless we have already determine the hash
329 * over the L4 4-tuple.
330 */
331 if (!skb->l4_rxhash)
332 skb->rxhash = 0;
Eric Dumazet290b8952010-09-27 00:33:35 +0000333 skb_set_queue_mapping(skb, 0);
334 skb_dst_drop(skb);
335 nf_reset(skb);
336}
337
338/**
Eric Dumazetd19d56d2010-05-17 22:36:55 -0700339 * skb_tunnel_rx - prepare skb for rx reinsert
340 * @skb: buffer
341 * @dev: tunnel device
342 *
343 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
344 * so make some cleanups, and perform accounting.
Eric Dumazet290b8952010-09-27 00:33:35 +0000345 * Note: this accounting is not SMP safe.
Eric Dumazetd19d56d2010-05-17 22:36:55 -0700346 */
347static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
348{
Eric Dumazetd19d56d2010-05-17 22:36:55 -0700349 /* TODO : stats should be SMP safe */
350 dev->stats.rx_packets++;
351 dev->stats.rx_bytes += skb->len;
Eric Dumazet290b8952010-09-27 00:33:35 +0000352 __skb_tunnel_rx(skb, dev);
Eric Dumazetd19d56d2010-05-17 22:36:55 -0700353}
354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355/* Children define the path of the packet through the
356 * Linux networking. Thus, destinations are stackable.
357 */
358
Steffen Klassert8764ab22010-06-04 01:57:38 +0000359static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360{
Steffen Klasserte4334302011-03-15 21:09:32 +0000361 struct dst_entry *child = dst_clone(skb_dst(skb)->child);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Steffen Klassert8764ab22010-06-04 01:57:38 +0000363 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 return child;
365}
366
Herbert Xu352e5122007-11-13 21:34:06 -0800367extern int dst_discard(struct sk_buff *skb);
Eldad Zack7f95e182012-06-16 15:14:49 +0200368extern void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
David S. Miller5110effe2012-07-02 02:21:03 -0700369 int initial_ref, int initial_obsolete,
370 unsigned short flags);
Eldad Zack7f95e182012-06-16 15:14:49 +0200371extern void __dst_free(struct dst_entry *dst);
372extern struct dst_entry *dst_destroy(struct dst_entry *dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
Eldad Zack7f95e182012-06-16 15:14:49 +0200374static inline void dst_free(struct dst_entry *dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375{
David S. Millerf5b0a872012-07-19 12:31:33 -0700376 if (dst->obsolete > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 return;
378 if (!atomic_read(&dst->__refcnt)) {
379 dst = dst_destroy(dst);
380 if (!dst)
381 return;
382 }
383 __dst_free(dst);
384}
385
386static inline void dst_rcu_free(struct rcu_head *head)
387{
388 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
389 dst_free(dst);
390}
391
392static inline void dst_confirm(struct dst_entry *dst)
393{
David S. Miller5110effe2012-07-02 02:21:03 -0700394 dst->pending_confirm = 1;
395}
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000396
David S. Miller5110effe2012-07-02 02:21:03 -0700397static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
398 struct sk_buff *skb)
399{
Eric Dumazet425f09a2012-08-07 02:19:56 +0000400 const struct hh_cache *hh;
David S. Miller5110effe2012-07-02 02:21:03 -0700401
Eric Dumazet425f09a2012-08-07 02:19:56 +0000402 if (dst->pending_confirm) {
403 unsigned long now = jiffies;
404
David S. Miller5110effe2012-07-02 02:21:03 -0700405 dst->pending_confirm = 0;
Eric Dumazet425f09a2012-08-07 02:19:56 +0000406 /* avoid dirtying neighbour */
407 if (n->confirmed != now)
408 n->confirmed = now;
David S. Miller69cce1d2011-07-17 23:09:49 -0700409 }
David S. Miller5110effe2012-07-02 02:21:03 -0700410
411 hh = &n->hh;
412 if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
413 return neigh_hh_output(hh, skb);
414 else
415 return n->output(n, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416}
417
David S. Millerd3aaeb32011-07-18 00:40:17 -0700418static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
419{
David S. Millerf894cbf2012-07-02 21:52:24 -0700420 return dst->ops->neigh_lookup(dst, NULL, daddr);
421}
422
423static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
424 struct sk_buff *skb)
425{
426 return dst->ops->neigh_lookup(dst, skb, NULL);
David S. Millerd3aaeb32011-07-18 00:40:17 -0700427}
428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429static inline void dst_link_failure(struct sk_buff *skb)
430{
Eric Dumazetadf30902009-06-02 05:19:30 +0000431 struct dst_entry *dst = skb_dst(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 if (dst && dst->ops && dst->ops->link_failure)
433 dst->ops->link_failure(skb);
434}
435
436static inline void dst_set_expires(struct dst_entry *dst, int timeout)
437{
438 unsigned long expires = jiffies + timeout;
439
440 if (expires == 0)
441 expires = 1;
442
443 if (dst->expires == 0 || time_before(expires, dst->expires))
444 dst->expires = expires;
445}
446
447/* Output packet to network from transport. */
448static inline int dst_output(struct sk_buff *skb)
449{
Eric Dumazetadf30902009-06-02 05:19:30 +0000450 return skb_dst(skb)->output(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451}
452
453/* Input packet from network to transport. */
454static inline int dst_input(struct sk_buff *skb)
455{
Eric Dumazetadf30902009-06-02 05:19:30 +0000456 return skb_dst(skb)->input(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457}
458
459static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
460{
461 if (dst->obsolete)
462 dst = dst->ops->check(dst, cookie);
463 return dst;
464}
465
466extern void dst_init(void);
467
Herbert Xu815f4e52007-12-12 10:36:59 -0800468/* Flags for xfrm_lookup flags argument. */
469enum {
David S. Miller80c0bc92011-03-01 14:36:37 -0800470 XFRM_LOOKUP_ICMP = 1 << 0,
Herbert Xu815f4e52007-12-12 10:36:59 -0800471};
472
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473struct flowi;
474#ifndef CONFIG_XFRM
David S. Miller452edd52011-03-02 13:27:41 -0800475static inline struct dst_entry *xfrm_lookup(struct net *net,
476 struct dst_entry *dst_orig,
477 const struct flowi *fl, struct sock *sk,
478 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479{
David S. Miller452edd52011-03-02 13:27:41 -0800480 return dst_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481}
482#else
David S. Miller452edd52011-03-02 13:27:41 -0800483extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
484 const struct flowi *fl, struct sock *sk,
485 int flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
488#endif /* _NET_DST_H */