| Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 1 | #ifndef __NET_FRAG_H__ | 
 | 2 | #define __NET_FRAG_H__ | 
 | 3 |  | 
| Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 4 | #include <linux/percpu_counter.h> | 
 | 5 |  | 
| Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 6 | struct netns_frags { | 
| Pavel Emelyanov | e5a2bb8 | 2008-01-22 06:06:23 -0800 | [diff] [blame] | 7 | 	int			nqueues; | 
| Pavel Emelyanov | 3140c25 | 2008-01-22 06:11:48 -0800 | [diff] [blame] | 8 | 	struct list_head	lru_list; | 
| Jesper Dangaard Brouer | 3ef0eb0 | 2013-01-28 23:45:51 +0000 | [diff] [blame] | 9 | 	spinlock_t		lru_lock; | 
| Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 10 |  | 
| Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 11 | 	/* The percpu_counter "mem" need to be cacheline aligned. | 
 | 12 | 	 *  mem.count must not share cacheline with other writers | 
| Jesper Dangaard Brouer | cd39a78 | 2013-01-28 23:44:14 +0000 | [diff] [blame] | 13 | 	 */ | 
| Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 14 | 	struct percpu_counter   mem ____cacheline_aligned_in_smp; | 
 | 15 |  | 
| Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 16 | 	/* sysctls */ | 
 | 17 | 	int			timeout; | 
| Pavel Emelyanov | e31e0bdc7 | 2008-01-22 06:10:13 -0800 | [diff] [blame] | 18 | 	int			high_thresh; | 
 | 19 | 	int			low_thresh; | 
| Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 20 | }; | 
 | 21 |  | 
| Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 22 | struct inet_frag_queue { | 
| Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 23 | 	spinlock_t		lock; | 
| Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 24 | 	struct timer_list	timer;      /* when will this queue expire? */ | 
| Jesper Dangaard Brouer | 6e34a8b | 2013-01-28 23:44:49 +0000 | [diff] [blame] | 25 | 	struct list_head	lru_list;   /* lru list member */ | 
 | 26 | 	struct hlist_node	list; | 
 | 27 | 	atomic_t		refcnt; | 
| Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 28 | 	struct sk_buff		*fragments; /* list of received fragments */ | 
| Changli Gao | d6bebca | 2010-06-29 04:39:37 +0000 | [diff] [blame] | 29 | 	struct sk_buff		*fragments_tail; | 
| Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 30 | 	ktime_t			stamp; | 
 | 31 | 	int			len;        /* total length of orig datagram */ | 
 | 32 | 	int			meat; | 
 | 33 | 	__u8			last_in;    /* first/last segment arrived? */ | 
 | 34 |  | 
| Joe Perches | bc578a5 | 2008-03-28 16:35:27 -0700 | [diff] [blame] | 35 | #define INET_FRAG_COMPLETE	4 | 
 | 36 | #define INET_FRAG_FIRST_IN	2 | 
 | 37 | #define INET_FRAG_LAST_IN	1 | 
| Patrick McHardy | 5f2d04f | 2012-08-26 19:13:55 +0200 | [diff] [blame] | 38 |  | 
 | 39 | 	u16			max_size; | 
| Jesper Dangaard Brouer | 6e34a8b | 2013-01-28 23:44:49 +0000 | [diff] [blame] | 40 |  | 
 | 41 | 	struct netns_frags	*net; | 
| Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 42 | }; | 
 | 43 |  | 
| Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 44 | #define INETFRAGS_HASHSZ		64 | 
 | 45 |  | 
 | 46 | struct inet_frags { | 
| Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 47 | 	struct hlist_head	hash[INETFRAGS_HASHSZ]; | 
| Jesper Dangaard Brouer | 5f8e1e8 | 2013-01-28 23:44:37 +0000 | [diff] [blame] | 48 | 	/* This rwlock is a global lock (seperate per IPv4, IPv6 and | 
 | 49 | 	 * netfilter). Important to keep this on a seperate cacheline. | 
 | 50 | 	 */ | 
 | 51 | 	rwlock_t		lock ____cacheline_aligned_in_smp; | 
| Pavel Emelyanov | 3b4bc4a | 2008-01-22 06:11:04 -0800 | [diff] [blame] | 52 | 	int			secret_interval; | 
| Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 53 | 	struct timer_list	secret_timer; | 
| Jesper Dangaard Brouer | 5f8e1e8 | 2013-01-28 23:44:37 +0000 | [diff] [blame] | 54 | 	u32			rnd; | 
 | 55 | 	int			qsize; | 
| Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 56 |  | 
 | 57 | 	unsigned int		(*hashfn)(struct inet_frag_queue *); | 
| Jesper Dangaard Brouer | 5f8e1e8 | 2013-01-28 23:44:37 +0000 | [diff] [blame] | 58 | 	bool			(*match)(struct inet_frag_queue *q, void *arg); | 
| Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 59 | 	void			(*constructor)(struct inet_frag_queue *q, | 
 | 60 | 						void *arg); | 
| Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 61 | 	void			(*destructor)(struct inet_frag_queue *); | 
 | 62 | 	void			(*skb_free)(struct sk_buff *); | 
| Pavel Emelyanov | e521db9 | 2007-10-17 19:45:23 -0700 | [diff] [blame] | 63 | 	void			(*frag_expire)(unsigned long data); | 
| Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 64 | }; | 
 | 65 |  | 
 | 66 | void inet_frags_init(struct inet_frags *); | 
 | 67 | void inet_frags_fini(struct inet_frags *); | 
 | 68 |  | 
| Pavel Emelyanov | e5a2bb8 | 2008-01-22 06:06:23 -0800 | [diff] [blame] | 69 | void inet_frags_init_net(struct netns_frags *nf); | 
| Pavel Emelyanov | 81566e8 | 2008-01-22 06:12:39 -0800 | [diff] [blame] | 70 | void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); | 
| Pavel Emelyanov | e5a2bb8 | 2008-01-22 06:06:23 -0800 | [diff] [blame] | 71 |  | 
| Pavel Emelyanov | 277e650 | 2007-10-15 02:37:18 -0700 | [diff] [blame] | 72 | void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); | 
| Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 73 | void inet_frag_destroy(struct inet_frag_queue *q, | 
 | 74 | 				struct inet_frags *f, int *work); | 
| Amerigo Wang | 6b10286 | 2012-09-18 16:50:11 +0000 | [diff] [blame] | 75 | int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force); | 
| Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 76 | struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, | 
| Hannes Eder | 56bca31 | 2009-02-25 10:32:52 +0000 | [diff] [blame] | 77 | 		struct inet_frags *f, void *key, unsigned int hash) | 
 | 78 | 	__releases(&f->lock); | 
| Pavel Emelyanov | 277e650 | 2007-10-15 02:37:18 -0700 | [diff] [blame] | 79 |  | 
| Pavel Emelyanov | 762cc40 | 2007-10-15 02:41:56 -0700 | [diff] [blame] | 80 | static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) | 
 | 81 | { | 
 | 82 | 	if (atomic_dec_and_test(&q->refcnt)) | 
 | 83 | 		inet_frag_destroy(q, f, NULL); | 
 | 84 | } | 
 | 85 |  | 
| Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 86 | /* Memory Tracking Functions. */ | 
 | 87 |  | 
| Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 88 | /* The default percpu_counter batch size is not big enough to scale to | 
 | 89 |  * fragmentation mem acct sizes. | 
 | 90 |  * The mem size of a 64K fragment is approx: | 
 | 91 |  *  (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes | 
 | 92 |  */ | 
 | 93 | static unsigned int frag_percpu_counter_batch = 130000; | 
 | 94 |  | 
| Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 95 | static inline int frag_mem_limit(struct netns_frags *nf) | 
 | 96 | { | 
| Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 97 | 	return percpu_counter_read(&nf->mem); | 
| Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 98 | } | 
 | 99 |  | 
 | 100 | static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) | 
 | 101 | { | 
| Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 102 | 	__percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); | 
| Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 103 | } | 
 | 104 |  | 
 | 105 | static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) | 
 | 106 | { | 
| Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 107 | 	__percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); | 
| Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 108 | } | 
 | 109 |  | 
 | 110 | static inline void init_frag_mem_limit(struct netns_frags *nf) | 
 | 111 | { | 
| Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 112 | 	percpu_counter_init(&nf->mem, 0); | 
| Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 113 | } | 
 | 114 |  | 
 | 115 | static inline int sum_frag_mem_limit(struct netns_frags *nf) | 
 | 116 | { | 
| Eric Dumazet | 4cfb048 | 2013-02-22 07:43:35 +0000 | [diff] [blame] | 117 | 	int res; | 
 | 118 |  | 
 | 119 | 	local_bh_disable(); | 
 | 120 | 	res = percpu_counter_sum_positive(&nf->mem); | 
 | 121 | 	local_bh_enable(); | 
 | 122 |  | 
 | 123 | 	return res; | 
| Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 124 | } | 
 | 125 |  | 
| Jesper Dangaard Brouer | 3ef0eb0 | 2013-01-28 23:45:51 +0000 | [diff] [blame] | 126 | static inline void inet_frag_lru_move(struct inet_frag_queue *q) | 
 | 127 | { | 
 | 128 | 	spin_lock(&q->net->lru_lock); | 
 | 129 | 	list_move_tail(&q->lru_list, &q->net->lru_list); | 
 | 130 | 	spin_unlock(&q->net->lru_lock); | 
 | 131 | } | 
 | 132 |  | 
 | 133 | static inline void inet_frag_lru_del(struct inet_frag_queue *q) | 
 | 134 | { | 
 | 135 | 	spin_lock(&q->net->lru_lock); | 
 | 136 | 	list_del(&q->lru_list); | 
 | 137 | 	spin_unlock(&q->net->lru_lock); | 
 | 138 | } | 
 | 139 |  | 
 | 140 | static inline void inet_frag_lru_add(struct netns_frags *nf, | 
 | 141 | 				     struct inet_frag_queue *q) | 
 | 142 | { | 
 | 143 | 	spin_lock(&nf->lru_lock); | 
 | 144 | 	list_add_tail(&q->lru_list, &nf->lru_list); | 
 | 145 | 	spin_unlock(&nf->lru_lock); | 
 | 146 | } | 
| Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 147 | #endif |