| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1 | /* | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 2 | * net/sched/sch_qfq.c         Quick Fair Queueing Plus Scheduler. | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 3 | * | 
|  | 4 | * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente. | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 5 | * Copyright (c) 2012 Paolo Valente. | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 6 | * | 
|  | 7 | * This program is free software; you can redistribute it and/or | 
|  | 8 | * modify it under the terms of the GNU General Public License | 
|  | 9 | * version 2 as published by the Free Software Foundation. | 
|  | 10 | */ | 
|  | 11 |  | 
|  | 12 | #include <linux/module.h> | 
|  | 13 | #include <linux/init.h> | 
|  | 14 | #include <linux/bitops.h> | 
|  | 15 | #include <linux/errno.h> | 
|  | 16 | #include <linux/netdevice.h> | 
|  | 17 | #include <linux/pkt_sched.h> | 
|  | 18 | #include <net/sch_generic.h> | 
|  | 19 | #include <net/pkt_sched.h> | 
|  | 20 | #include <net/pkt_cls.h> | 
|  | 21 |  | 
|  | 22 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 23 | /*  Quick Fair Queueing Plus | 
|  | 24 | ======================== | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 25 |  | 
|  | 26 | Sources: | 
|  | 27 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 28 | [1] Paolo Valente, | 
|  | 29 | "Reducing the Execution Time of Fair-Queueing Schedulers." | 
|  | 30 | http://algo.ing.unimo.it/people/paolo/agg-sched/agg-sched.pdf | 
|  | 31 |  | 
|  | 32 | Sources for QFQ: | 
|  | 33 |  | 
|  | 34 | [2] Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 35 | Packet Scheduling with Tight Bandwidth Distribution Guarantees." | 
|  | 36 |  | 
|  | 37 | See also: | 
|  | 38 | http://retis.sssup.it/~fabio/linux/qfq/ | 
|  | 39 | */ | 
|  | 40 |  | 
|  | 41 | /* | 
|  | 42 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 43 | QFQ+ divides classes into aggregates of at most MAX_AGG_CLASSES | 
|  | 44 | classes. Each aggregate is timestamped with a virtual start time S | 
|  | 45 | and a virtual finish time F, and scheduled according to its | 
|  | 46 | timestamps. S and F are computed as a function of a system virtual | 
|  | 47 | time function V. The classes within each aggregate are instead | 
|  | 48 | scheduled with DRR. | 
|  | 49 |  | 
|  | 50 | To speed up operations, QFQ+ divides also aggregates into a limited | 
|  | 51 | number of groups. Which group a class belongs to depends on the | 
|  | 52 | ratio between the maximum packet length for the class and the weight | 
|  | 53 | of the class. Groups have their own S and F. In the end, QFQ+ | 
|  | 54 | schedules groups, then aggregates within groups, then classes within | 
|  | 55 | aggregates. See [1] and [2] for a full description. | 
|  | 56 |  | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 57 | Virtual time computations. | 
|  | 58 |  | 
|  | 59 | S, F and V are all computed in fixed point arithmetic with | 
|  | 60 | FRAC_BITS decimal bits. | 
|  | 61 |  | 
|  | 62 | QFQ_MAX_INDEX is the maximum index allowed for a group. We need | 
|  | 63 | one bit per index. | 
|  | 64 | QFQ_MAX_WSHIFT is the maximum power of two supported as a weight. | 
|  | 65 |  | 
|  | 66 | The layout of the bits is as below: | 
|  | 67 |  | 
|  | 68 | [ MTU_SHIFT ][      FRAC_BITS    ] | 
|  | 69 | [ MAX_INDEX    ][ MIN_SLOT_SHIFT ] | 
|  | 70 | ^.__grp->index = 0 | 
|  | 71 | *.__grp->slot_shift | 
|  | 72 |  | 
|  | 73 | where MIN_SLOT_SHIFT is derived by difference from the others. | 
|  | 74 |  | 
|  | 75 | The max group index corresponds to Lmax/w_min, where | 
|  | 76 | Lmax=1<<MTU_SHIFT, w_min = 1 . | 
|  | 77 | From this, and knowing how many groups (MAX_INDEX) we want, | 
|  | 78 | we can derive the shift corresponding to each group. | 
|  | 79 |  | 
|  | 80 | Because we often need to compute | 
|  | 81 | F = S + len/w_i  and V = V + len/wsum | 
|  | 82 | instead of storing w_i store the value | 
|  | 83 | inv_w = (1<<FRAC_BITS)/w_i | 
|  | 84 | so we can do F = S + len * inv_w * wsum. | 
|  | 85 | We use W_TOT in the formulas so we can easily move between | 
|  | 86 | static and adaptive weight sum. | 
|  | 87 |  | 
|  | 88 | The per-scheduler-instance data contain all the data structures | 
|  | 89 | for the scheduler: bitmaps and bucket lists. | 
|  | 90 |  | 
|  | 91 | */ | 
|  | 92 |  | 
|  | 93 | /* | 
|  | 94 | * Maximum number of consecutive slots occupied by backlogged classes | 
|  | 95 | * inside a group. | 
|  | 96 | */ | 
|  | 97 | #define QFQ_MAX_SLOTS	32 | 
|  | 98 |  | 
|  | 99 | /* | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 100 | * Shifts used for aggregate<->group mapping.  We allow class weights that are | 
|  | 101 | * in the range [1, 2^MAX_WSHIFT], and we try to map each aggregate i to the | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 102 | * group with the smallest index that can support the L_i / r_i configured | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 103 | * for the classes in the aggregate. | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 104 | * | 
|  | 105 | * grp->index is the index of the group; and grp->slot_shift | 
|  | 106 | * is the shift for the corresponding (scaled) sigma_i. | 
|  | 107 | */ | 
| Paolo Valente | 3015f3d | 2012-11-05 20:29:24 +0000 | [diff] [blame] | 108 | #define QFQ_MAX_INDEX		24 | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 109 | #define QFQ_MAX_WSHIFT		10 | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 110 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 111 | #define	QFQ_MAX_WEIGHT		(1<<QFQ_MAX_WSHIFT) /* see qfq_slot_insert */ | 
|  | 112 | #define QFQ_MAX_WSUM		(64*QFQ_MAX_WEIGHT) | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 113 |  | 
|  | 114 | #define FRAC_BITS		30	/* fixed point arithmetic */ | 
|  | 115 | #define ONE_FP			(1UL << FRAC_BITS) | 
|  | 116 | #define IWSUM			(ONE_FP/QFQ_MAX_WSUM) | 
|  | 117 |  | 
| Paolo Valente | 3015f3d | 2012-11-05 20:29:24 +0000 | [diff] [blame] | 118 | #define QFQ_MTU_SHIFT		16	/* to support TSO/GSO */ | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 119 | #define QFQ_MIN_LMAX		512	/* see qfq_slot_insert */ | 
|  | 120 |  | 
|  | 121 | #define QFQ_MAX_AGG_CLASSES	8 /* max num classes per aggregate allowed */ | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 122 |  | 
|  | 123 | /* | 
|  | 124 | * Possible group states.  These values are used as indexes for the bitmaps | 
|  | 125 | * array of struct qfq_queue. | 
|  | 126 | */ | 
|  | 127 | enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE }; | 
|  | 128 |  | 
|  | 129 | struct qfq_group; | 
|  | 130 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 131 | struct qfq_aggregate; | 
|  | 132 |  | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 133 | struct qfq_class { | 
|  | 134 | struct Qdisc_class_common common; | 
|  | 135 |  | 
|  | 136 | unsigned int refcnt; | 
|  | 137 | unsigned int filter_cnt; | 
|  | 138 |  | 
|  | 139 | struct gnet_stats_basic_packed bstats; | 
|  | 140 | struct gnet_stats_queue qstats; | 
|  | 141 | struct gnet_stats_rate_est rate_est; | 
|  | 142 | struct Qdisc *qdisc; | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 143 | struct list_head alist;		/* Link for active-classes list. */ | 
|  | 144 | struct qfq_aggregate *agg;	/* Parent aggregate. */ | 
|  | 145 | int deficit;			/* DRR deficit counter. */ | 
|  | 146 | }; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 147 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 148 | struct qfq_aggregate { | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 149 | struct hlist_node next;	/* Link for the slot list. */ | 
|  | 150 | u64 S, F;		/* flow timestamps (exact) */ | 
|  | 151 |  | 
|  | 152 | /* group we belong to. In principle we would need the index, | 
|  | 153 | * which is log_2(lmax/weight), but we never reference it | 
|  | 154 | * directly, only the group. | 
|  | 155 | */ | 
|  | 156 | struct qfq_group *grp; | 
|  | 157 |  | 
|  | 158 | /* these are copied from the flowset. */ | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 159 | u32	class_weight; /* Weight of each class in this aggregate. */ | 
|  | 160 | /* Max pkt size for the classes in this aggregate, DRR quantum. */ | 
|  | 161 | int	lmax; | 
|  | 162 |  | 
|  | 163 | u32	inv_w;	    /* ONE_FP/(sum of weights of classes in aggr.). */ | 
|  | 164 | u32	budgetmax;  /* Max budget for this aggregate. */ | 
|  | 165 | u32	initial_budget, budget;     /* Initial and current budget. */ | 
|  | 166 |  | 
|  | 167 | int		  num_classes;	/* Number of classes in this aggr. */ | 
|  | 168 | struct list_head  active;	/* DRR queue of active classes. */ | 
|  | 169 |  | 
|  | 170 | struct hlist_node nonfull_next;	/* See nonfull_aggs in qfq_sched. */ | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 171 | }; | 
|  | 172 |  | 
|  | 173 | struct qfq_group { | 
|  | 174 | u64 S, F;			/* group timestamps (approx). */ | 
|  | 175 | unsigned int slot_shift;	/* Slot shift. */ | 
|  | 176 | unsigned int index;		/* Group index. */ | 
|  | 177 | unsigned int front;		/* Index of the front slot. */ | 
|  | 178 | unsigned long full_slots;	/* non-empty slots */ | 
|  | 179 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 180 | /* Array of RR lists of active aggregates. */ | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 181 | struct hlist_head slots[QFQ_MAX_SLOTS]; | 
|  | 182 | }; | 
|  | 183 |  | 
|  | 184 | struct qfq_sched { | 
|  | 185 | struct tcf_proto *filter_list; | 
|  | 186 | struct Qdisc_class_hash clhash; | 
|  | 187 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 188 | u64			oldV, V;	/* Precise virtual times. */ | 
|  | 189 | struct qfq_aggregate	*in_serv_agg;   /* Aggregate being served. */ | 
|  | 190 | u32			num_active_agg; /* Num. of active aggregates */ | 
|  | 191 | u32			wsum;		/* weight sum */ | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 192 |  | 
|  | 193 | unsigned long bitmaps[QFQ_MAX_STATE];	    /* Group bitmaps. */ | 
|  | 194 | struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */ | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 195 | u32 min_slot_shift;	/* Index of the group-0 bit in the bitmaps. */ | 
|  | 196 |  | 
|  | 197 | u32 max_agg_classes;		/* Max number of classes per aggr. */ | 
|  | 198 | struct hlist_head nonfull_aggs; /* Aggs with room for more classes. */ | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 199 | }; | 
|  | 200 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 201 | /* | 
|  | 202 | * Possible reasons why the timestamps of an aggregate are updated | 
|  | 203 | * enqueue: the aggregate switches from idle to active and must scheduled | 
|  | 204 | *	    for service | 
|  | 205 | * requeue: the aggregate finishes its budget, so it stops being served and | 
|  | 206 | *	    must be rescheduled for service | 
|  | 207 | */ | 
|  | 208 | enum update_reason {enqueue, requeue}; | 
|  | 209 |  | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 210 | static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid) | 
|  | 211 | { | 
|  | 212 | struct qfq_sched *q = qdisc_priv(sch); | 
|  | 213 | struct Qdisc_class_common *clc; | 
|  | 214 |  | 
|  | 215 | clc = qdisc_class_find(&q->clhash, classid); | 
|  | 216 | if (clc == NULL) | 
|  | 217 | return NULL; | 
|  | 218 | return container_of(clc, struct qfq_class, common); | 
|  | 219 | } | 
|  | 220 |  | 
|  | 221 | static void qfq_purge_queue(struct qfq_class *cl) | 
|  | 222 | { | 
|  | 223 | unsigned int len = cl->qdisc->q.qlen; | 
|  | 224 |  | 
|  | 225 | qdisc_reset(cl->qdisc); | 
|  | 226 | qdisc_tree_decrease_qlen(cl->qdisc, len); | 
|  | 227 | } | 
|  | 228 |  | 
|  | 229 | static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = { | 
|  | 230 | [TCA_QFQ_WEIGHT] = { .type = NLA_U32 }, | 
|  | 231 | [TCA_QFQ_LMAX] = { .type = NLA_U32 }, | 
|  | 232 | }; | 
|  | 233 |  | 
|  | 234 | /* | 
|  | 235 | * Calculate a flow index, given its weight and maximum packet length. | 
|  | 236 | * index = log_2(maxlen/weight) but we need to apply the scaling. | 
|  | 237 | * This is used only once at flow creation. | 
|  | 238 | */ | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 239 | static int qfq_calc_index(u32 inv_w, unsigned int maxlen, u32 min_slot_shift) | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 240 | { | 
|  | 241 | u64 slot_size = (u64)maxlen * inv_w; | 
|  | 242 | unsigned long size_map; | 
|  | 243 | int index = 0; | 
|  | 244 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 245 | size_map = slot_size >> min_slot_shift; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 246 | if (!size_map) | 
|  | 247 | goto out; | 
|  | 248 |  | 
|  | 249 | index = __fls(size_map) + 1;	/* basically a log_2 */ | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 250 | index -= !(slot_size - (1ULL << (index + min_slot_shift - 1))); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 251 |  | 
|  | 252 | if (index < 0) | 
|  | 253 | index = 0; | 
|  | 254 | out: | 
|  | 255 | pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n", | 
|  | 256 | (unsigned long) ONE_FP/inv_w, maxlen, index); | 
|  | 257 |  | 
|  | 258 | return index; | 
|  | 259 | } | 
|  | 260 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 261 | static void qfq_deactivate_agg(struct qfq_sched *, struct qfq_aggregate *); | 
|  | 262 | static void qfq_activate_agg(struct qfq_sched *, struct qfq_aggregate *, | 
|  | 263 | enum update_reason); | 
| Paolo Valente | be72f63 | 2012-08-07 07:27:25 +0000 | [diff] [blame] | 264 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 265 | static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg, | 
|  | 266 | u32 lmax, u32 weight) | 
|  | 267 | { | 
|  | 268 | INIT_LIST_HEAD(&agg->active); | 
|  | 269 | hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); | 
|  | 270 |  | 
|  | 271 | agg->lmax = lmax; | 
|  | 272 | agg->class_weight = weight; | 
| Paolo Valente | be72f63 | 2012-08-07 07:27:25 +0000 | [diff] [blame] | 273 | } | 
|  | 274 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 275 | static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q, | 
|  | 276 | u32 lmax, u32 weight) | 
| Paolo Valente | be72f63 | 2012-08-07 07:27:25 +0000 | [diff] [blame] | 277 | { | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 278 | struct qfq_aggregate *agg; | 
|  | 279 | struct hlist_node *n; | 
| Paolo Valente | be72f63 | 2012-08-07 07:27:25 +0000 | [diff] [blame] | 280 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 281 | hlist_for_each_entry(agg, n, &q->nonfull_aggs, nonfull_next) | 
|  | 282 | if (agg->lmax == lmax && agg->class_weight == weight) | 
|  | 283 | return agg; | 
| Paolo Valente | be72f63 | 2012-08-07 07:27:25 +0000 | [diff] [blame] | 284 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 285 | return NULL; | 
| Paolo Valente | be72f63 | 2012-08-07 07:27:25 +0000 | [diff] [blame] | 286 | } | 
|  | 287 |  | 
| Paolo Valente | 3015f3d | 2012-11-05 20:29:24 +0000 | [diff] [blame] | 288 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 289 | /* Update aggregate as a function of the new number of classes. */ | 
|  | 290 | static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg, | 
|  | 291 | int new_num_classes) | 
|  | 292 | { | 
|  | 293 | u32 new_agg_weight; | 
|  | 294 |  | 
|  | 295 | if (new_num_classes == q->max_agg_classes) | 
|  | 296 | hlist_del_init(&agg->nonfull_next); | 
|  | 297 |  | 
|  | 298 | if (agg->num_classes > new_num_classes && | 
|  | 299 | new_num_classes == q->max_agg_classes - 1) /* agg no more full */ | 
|  | 300 | hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); | 
|  | 301 |  | 
|  | 302 | agg->budgetmax = new_num_classes * agg->lmax; | 
|  | 303 | new_agg_weight = agg->class_weight * new_num_classes; | 
|  | 304 | agg->inv_w = ONE_FP/new_agg_weight; | 
|  | 305 |  | 
|  | 306 | if (agg->grp == NULL) { | 
|  | 307 | int i = qfq_calc_index(agg->inv_w, agg->budgetmax, | 
|  | 308 | q->min_slot_shift); | 
|  | 309 | agg->grp = &q->groups[i]; | 
| Paolo Valente | 3015f3d | 2012-11-05 20:29:24 +0000 | [diff] [blame] | 310 | } | 
|  | 311 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 312 | q->wsum += | 
|  | 313 | (int) agg->class_weight * (new_num_classes - agg->num_classes); | 
| Paolo Valente | 3015f3d | 2012-11-05 20:29:24 +0000 | [diff] [blame] | 314 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 315 | agg->num_classes = new_num_classes; | 
| Paolo Valente | 3015f3d | 2012-11-05 20:29:24 +0000 | [diff] [blame] | 316 | } | 
|  | 317 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 318 | /* Add class to aggregate. */ | 
|  | 319 | static void qfq_add_to_agg(struct qfq_sched *q, | 
|  | 320 | struct qfq_aggregate *agg, | 
|  | 321 | struct qfq_class *cl) | 
|  | 322 | { | 
|  | 323 | cl->agg = agg; | 
|  | 324 |  | 
|  | 325 | qfq_update_agg(q, agg, agg->num_classes+1); | 
|  | 326 | if (cl->qdisc->q.qlen > 0) { /* adding an active class */ | 
|  | 327 | list_add_tail(&cl->alist, &agg->active); | 
|  | 328 | if (list_first_entry(&agg->active, struct qfq_class, alist) == | 
|  | 329 | cl && q->in_serv_agg != agg) /* agg was inactive */ | 
|  | 330 | qfq_activate_agg(q, agg, enqueue); /* schedule agg */ | 
|  | 331 | } | 
|  | 332 | } | 
|  | 333 |  | 
|  | 334 | static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *); | 
|  | 335 |  | 
|  | 336 | static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg) | 
|  | 337 | { | 
|  | 338 | if (!hlist_unhashed(&agg->nonfull_next)) | 
|  | 339 | hlist_del_init(&agg->nonfull_next); | 
|  | 340 | if (q->in_serv_agg == agg) | 
|  | 341 | q->in_serv_agg = qfq_choose_next_agg(q); | 
|  | 342 | kfree(agg); | 
|  | 343 | } | 
|  | 344 |  | 
|  | 345 | /* Deschedule class from within its parent aggregate. */ | 
|  | 346 | static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl) | 
|  | 347 | { | 
|  | 348 | struct qfq_aggregate *agg = cl->agg; | 
|  | 349 |  | 
|  | 350 |  | 
|  | 351 | list_del(&cl->alist); /* remove from RR queue of the aggregate */ | 
|  | 352 | if (list_empty(&agg->active)) /* agg is now inactive */ | 
|  | 353 | qfq_deactivate_agg(q, agg); | 
|  | 354 | } | 
|  | 355 |  | 
|  | 356 | /* Remove class from its parent aggregate. */ | 
|  | 357 | static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl) | 
|  | 358 | { | 
|  | 359 | struct qfq_aggregate *agg = cl->agg; | 
|  | 360 |  | 
|  | 361 | cl->agg = NULL; | 
|  | 362 | if (agg->num_classes == 1) { /* agg being emptied, destroy it */ | 
|  | 363 | qfq_destroy_agg(q, agg); | 
|  | 364 | return; | 
|  | 365 | } | 
|  | 366 | qfq_update_agg(q, agg, agg->num_classes-1); | 
|  | 367 | } | 
|  | 368 |  | 
|  | 369 | /* Deschedule class and remove it from its parent aggregate. */ | 
|  | 370 | static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl) | 
|  | 371 | { | 
|  | 372 | if (cl->qdisc->q.qlen > 0) /* class is active */ | 
|  | 373 | qfq_deactivate_class(q, cl); | 
|  | 374 |  | 
|  | 375 | qfq_rm_from_agg(q, cl); | 
|  | 376 | } | 
|  | 377 |  | 
|  | 378 | /* Move class to a new aggregate, matching the new class weight and/or lmax */ | 
|  | 379 | static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight, | 
|  | 380 | u32 lmax) | 
|  | 381 | { | 
|  | 382 | struct qfq_sched *q = qdisc_priv(sch); | 
|  | 383 | struct qfq_aggregate *new_agg = qfq_find_agg(q, lmax, weight); | 
|  | 384 |  | 
|  | 385 | if (new_agg == NULL) { /* create new aggregate */ | 
|  | 386 | new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC); | 
|  | 387 | if (new_agg == NULL) | 
|  | 388 | return -ENOBUFS; | 
|  | 389 | qfq_init_agg(q, new_agg, lmax, weight); | 
|  | 390 | } | 
|  | 391 | qfq_deact_rm_from_agg(q, cl); | 
|  | 392 | qfq_add_to_agg(q, new_agg, cl); | 
|  | 393 |  | 
|  | 394 | return 0; | 
|  | 395 | } | 
| Paolo Valente | 3015f3d | 2012-11-05 20:29:24 +0000 | [diff] [blame] | 396 |  | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 397 | static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | 
|  | 398 | struct nlattr **tca, unsigned long *arg) | 
|  | 399 | { | 
|  | 400 | struct qfq_sched *q = qdisc_priv(sch); | 
|  | 401 | struct qfq_class *cl = (struct qfq_class *)*arg; | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 402 | bool existing = false; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 403 | struct nlattr *tb[TCA_QFQ_MAX + 1]; | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 404 | struct qfq_aggregate *new_agg = NULL; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 405 | u32 weight, lmax, inv_w; | 
| Paolo Valente | 3015f3d | 2012-11-05 20:29:24 +0000 | [diff] [blame] | 406 | int err; | 
| Eric Dumazet | d32ae76 | 2012-01-02 11:47:50 +0000 | [diff] [blame] | 407 | int delta_w; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 408 |  | 
|  | 409 | if (tca[TCA_OPTIONS] == NULL) { | 
|  | 410 | pr_notice("qfq: no options\n"); | 
|  | 411 | return -EINVAL; | 
|  | 412 | } | 
|  | 413 |  | 
|  | 414 | err = nla_parse_nested(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS], qfq_policy); | 
|  | 415 | if (err < 0) | 
|  | 416 | return err; | 
|  | 417 |  | 
|  | 418 | if (tb[TCA_QFQ_WEIGHT]) { | 
|  | 419 | weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]); | 
|  | 420 | if (!weight || weight > (1UL << QFQ_MAX_WSHIFT)) { | 
|  | 421 | pr_notice("qfq: invalid weight %u\n", weight); | 
|  | 422 | return -EINVAL; | 
|  | 423 | } | 
|  | 424 | } else | 
|  | 425 | weight = 1; | 
|  | 426 |  | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 427 | if (tb[TCA_QFQ_LMAX]) { | 
|  | 428 | lmax = nla_get_u32(tb[TCA_QFQ_LMAX]); | 
| Paolo Valente | 3015f3d | 2012-11-05 20:29:24 +0000 | [diff] [blame] | 429 | if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) { | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 430 | pr_notice("qfq: invalid max length %u\n", lmax); | 
|  | 431 | return -EINVAL; | 
|  | 432 | } | 
|  | 433 | } else | 
| Paolo Valente | 3015f3d | 2012-11-05 20:29:24 +0000 | [diff] [blame] | 434 | lmax = psched_mtu(qdisc_dev(sch)); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 435 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 436 | inv_w = ONE_FP / weight; | 
|  | 437 | weight = ONE_FP / inv_w; | 
|  | 438 |  | 
|  | 439 | if (cl != NULL && | 
|  | 440 | lmax == cl->agg->lmax && | 
|  | 441 | weight == cl->agg->class_weight) | 
|  | 442 | return 0; /* nothing to change */ | 
|  | 443 |  | 
|  | 444 | delta_w = weight - (cl ? cl->agg->class_weight : 0); | 
|  | 445 |  | 
|  | 446 | if (q->wsum + delta_w > QFQ_MAX_WSUM) { | 
|  | 447 | pr_notice("qfq: total weight out of range (%d + %u)\n", | 
|  | 448 | delta_w, q->wsum); | 
|  | 449 | return -EINVAL; | 
|  | 450 | } | 
|  | 451 |  | 
|  | 452 | if (cl != NULL) { /* modify existing class */ | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 453 | if (tca[TCA_RATE]) { | 
|  | 454 | err = gen_replace_estimator(&cl->bstats, &cl->rate_est, | 
|  | 455 | qdisc_root_sleeping_lock(sch), | 
|  | 456 | tca[TCA_RATE]); | 
|  | 457 | if (err) | 
|  | 458 | return err; | 
|  | 459 | } | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 460 | existing = true; | 
|  | 461 | goto set_change_agg; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 462 | } | 
|  | 463 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 464 | /* create and init new class */ | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 465 | cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL); | 
|  | 466 | if (cl == NULL) | 
|  | 467 | return -ENOBUFS; | 
|  | 468 |  | 
|  | 469 | cl->refcnt = 1; | 
|  | 470 | cl->common.classid = classid; | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 471 | cl->deficit = lmax; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 472 |  | 
|  | 473 | cl->qdisc = qdisc_create_dflt(sch->dev_queue, | 
|  | 474 | &pfifo_qdisc_ops, classid); | 
|  | 475 | if (cl->qdisc == NULL) | 
|  | 476 | cl->qdisc = &noop_qdisc; | 
|  | 477 |  | 
|  | 478 | if (tca[TCA_RATE]) { | 
|  | 479 | err = gen_new_estimator(&cl->bstats, &cl->rate_est, | 
|  | 480 | qdisc_root_sleeping_lock(sch), | 
|  | 481 | tca[TCA_RATE]); | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 482 | if (err) | 
|  | 483 | goto destroy_class; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 484 | } | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 485 |  | 
|  | 486 | sch_tree_lock(sch); | 
|  | 487 | qdisc_class_hash_insert(&q->clhash, &cl->common); | 
|  | 488 | sch_tree_unlock(sch); | 
|  | 489 |  | 
|  | 490 | qdisc_class_hash_grow(sch, &q->clhash); | 
|  | 491 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 492 | set_change_agg: | 
|  | 493 | sch_tree_lock(sch); | 
|  | 494 | new_agg = qfq_find_agg(q, lmax, weight); | 
|  | 495 | if (new_agg == NULL) { /* create new aggregate */ | 
|  | 496 | sch_tree_unlock(sch); | 
|  | 497 | new_agg = kzalloc(sizeof(*new_agg), GFP_KERNEL); | 
|  | 498 | if (new_agg == NULL) { | 
|  | 499 | err = -ENOBUFS; | 
|  | 500 | gen_kill_estimator(&cl->bstats, &cl->rate_est); | 
|  | 501 | goto destroy_class; | 
|  | 502 | } | 
|  | 503 | sch_tree_lock(sch); | 
|  | 504 | qfq_init_agg(q, new_agg, lmax, weight); | 
|  | 505 | } | 
|  | 506 | if (existing) | 
|  | 507 | qfq_deact_rm_from_agg(q, cl); | 
|  | 508 | qfq_add_to_agg(q, new_agg, cl); | 
|  | 509 | sch_tree_unlock(sch); | 
|  | 510 |  | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 511 | *arg = (unsigned long)cl; | 
|  | 512 | return 0; | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 513 |  | 
|  | 514 | destroy_class: | 
|  | 515 | qdisc_destroy(cl->qdisc); | 
|  | 516 | kfree(cl); | 
|  | 517 | return err; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 518 | } | 
|  | 519 |  | 
|  | 520 | static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl) | 
|  | 521 | { | 
|  | 522 | struct qfq_sched *q = qdisc_priv(sch); | 
|  | 523 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 524 | qfq_rm_from_agg(q, cl); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 525 | gen_kill_estimator(&cl->bstats, &cl->rate_est); | 
|  | 526 | qdisc_destroy(cl->qdisc); | 
|  | 527 | kfree(cl); | 
|  | 528 | } | 
|  | 529 |  | 
|  | 530 | static int qfq_delete_class(struct Qdisc *sch, unsigned long arg) | 
|  | 531 | { | 
|  | 532 | struct qfq_sched *q = qdisc_priv(sch); | 
|  | 533 | struct qfq_class *cl = (struct qfq_class *)arg; | 
|  | 534 |  | 
|  | 535 | if (cl->filter_cnt > 0) | 
|  | 536 | return -EBUSY; | 
|  | 537 |  | 
|  | 538 | sch_tree_lock(sch); | 
|  | 539 |  | 
|  | 540 | qfq_purge_queue(cl); | 
|  | 541 | qdisc_class_hash_remove(&q->clhash, &cl->common); | 
|  | 542 |  | 
|  | 543 | BUG_ON(--cl->refcnt == 0); | 
|  | 544 | /* | 
|  | 545 | * This shouldn't happen: we "hold" one cops->get() when called | 
|  | 546 | * from tc_ctl_tclass; the destroy method is done from cops->put(). | 
|  | 547 | */ | 
|  | 548 |  | 
|  | 549 | sch_tree_unlock(sch); | 
|  | 550 | return 0; | 
|  | 551 | } | 
|  | 552 |  | 
|  | 553 | static unsigned long qfq_get_class(struct Qdisc *sch, u32 classid) | 
|  | 554 | { | 
|  | 555 | struct qfq_class *cl = qfq_find_class(sch, classid); | 
|  | 556 |  | 
|  | 557 | if (cl != NULL) | 
|  | 558 | cl->refcnt++; | 
|  | 559 |  | 
|  | 560 | return (unsigned long)cl; | 
|  | 561 | } | 
|  | 562 |  | 
|  | 563 | static void qfq_put_class(struct Qdisc *sch, unsigned long arg) | 
|  | 564 | { | 
|  | 565 | struct qfq_class *cl = (struct qfq_class *)arg; | 
|  | 566 |  | 
|  | 567 | if (--cl->refcnt == 0) | 
|  | 568 | qfq_destroy_class(sch, cl); | 
|  | 569 | } | 
|  | 570 |  | 
|  | 571 | static struct tcf_proto **qfq_tcf_chain(struct Qdisc *sch, unsigned long cl) | 
|  | 572 | { | 
|  | 573 | struct qfq_sched *q = qdisc_priv(sch); | 
|  | 574 |  | 
|  | 575 | if (cl) | 
|  | 576 | return NULL; | 
|  | 577 |  | 
|  | 578 | return &q->filter_list; | 
|  | 579 | } | 
|  | 580 |  | 
|  | 581 | static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent, | 
|  | 582 | u32 classid) | 
|  | 583 | { | 
|  | 584 | struct qfq_class *cl = qfq_find_class(sch, classid); | 
|  | 585 |  | 
|  | 586 | if (cl != NULL) | 
|  | 587 | cl->filter_cnt++; | 
|  | 588 |  | 
|  | 589 | return (unsigned long)cl; | 
|  | 590 | } | 
|  | 591 |  | 
|  | 592 | static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg) | 
|  | 593 | { | 
|  | 594 | struct qfq_class *cl = (struct qfq_class *)arg; | 
|  | 595 |  | 
|  | 596 | cl->filter_cnt--; | 
|  | 597 | } | 
|  | 598 |  | 
|  | 599 | static int qfq_graft_class(struct Qdisc *sch, unsigned long arg, | 
|  | 600 | struct Qdisc *new, struct Qdisc **old) | 
|  | 601 | { | 
|  | 602 | struct qfq_class *cl = (struct qfq_class *)arg; | 
|  | 603 |  | 
|  | 604 | if (new == NULL) { | 
|  | 605 | new = qdisc_create_dflt(sch->dev_queue, | 
|  | 606 | &pfifo_qdisc_ops, cl->common.classid); | 
|  | 607 | if (new == NULL) | 
|  | 608 | new = &noop_qdisc; | 
|  | 609 | } | 
|  | 610 |  | 
|  | 611 | sch_tree_lock(sch); | 
|  | 612 | qfq_purge_queue(cl); | 
|  | 613 | *old = cl->qdisc; | 
|  | 614 | cl->qdisc = new; | 
|  | 615 | sch_tree_unlock(sch); | 
|  | 616 | return 0; | 
|  | 617 | } | 
|  | 618 |  | 
|  | 619 | static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg) | 
|  | 620 | { | 
|  | 621 | struct qfq_class *cl = (struct qfq_class *)arg; | 
|  | 622 |  | 
|  | 623 | return cl->qdisc; | 
|  | 624 | } | 
|  | 625 |  | 
|  | 626 | static int qfq_dump_class(struct Qdisc *sch, unsigned long arg, | 
|  | 627 | struct sk_buff *skb, struct tcmsg *tcm) | 
|  | 628 | { | 
|  | 629 | struct qfq_class *cl = (struct qfq_class *)arg; | 
|  | 630 | struct nlattr *nest; | 
|  | 631 |  | 
|  | 632 | tcm->tcm_parent	= TC_H_ROOT; | 
|  | 633 | tcm->tcm_handle	= cl->common.classid; | 
|  | 634 | tcm->tcm_info	= cl->qdisc->handle; | 
|  | 635 |  | 
|  | 636 | nest = nla_nest_start(skb, TCA_OPTIONS); | 
|  | 637 | if (nest == NULL) | 
|  | 638 | goto nla_put_failure; | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 639 | if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) || | 
|  | 640 | nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax)) | 
| David S. Miller | 1b34ec4 | 2012-03-29 05:11:39 -0400 | [diff] [blame] | 641 | goto nla_put_failure; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 642 | return nla_nest_end(skb, nest); | 
|  | 643 |  | 
|  | 644 | nla_put_failure: | 
|  | 645 | nla_nest_cancel(skb, nest); | 
|  | 646 | return -EMSGSIZE; | 
|  | 647 | } | 
|  | 648 |  | 
|  | 649 | static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg, | 
|  | 650 | struct gnet_dump *d) | 
|  | 651 | { | 
|  | 652 | struct qfq_class *cl = (struct qfq_class *)arg; | 
|  | 653 | struct tc_qfq_stats xstats; | 
|  | 654 |  | 
|  | 655 | memset(&xstats, 0, sizeof(xstats)); | 
|  | 656 | cl->qdisc->qstats.qlen = cl->qdisc->q.qlen; | 
|  | 657 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 658 | xstats.weight = cl->agg->class_weight; | 
|  | 659 | xstats.lmax = cl->agg->lmax; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 660 |  | 
|  | 661 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | 
|  | 662 | gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || | 
|  | 663 | gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0) | 
|  | 664 | return -1; | 
|  | 665 |  | 
|  | 666 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); | 
|  | 667 | } | 
|  | 668 |  | 
|  | 669 | static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | 
|  | 670 | { | 
|  | 671 | struct qfq_sched *q = qdisc_priv(sch); | 
|  | 672 | struct qfq_class *cl; | 
|  | 673 | struct hlist_node *n; | 
|  | 674 | unsigned int i; | 
|  | 675 |  | 
|  | 676 | if (arg->stop) | 
|  | 677 | return; | 
|  | 678 |  | 
|  | 679 | for (i = 0; i < q->clhash.hashsize; i++) { | 
|  | 680 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { | 
|  | 681 | if (arg->count < arg->skip) { | 
|  | 682 | arg->count++; | 
|  | 683 | continue; | 
|  | 684 | } | 
|  | 685 | if (arg->fn(sch, (unsigned long)cl, arg) < 0) { | 
|  | 686 | arg->stop = 1; | 
|  | 687 | return; | 
|  | 688 | } | 
|  | 689 | arg->count++; | 
|  | 690 | } | 
|  | 691 | } | 
|  | 692 | } | 
|  | 693 |  | 
|  | 694 | static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch, | 
|  | 695 | int *qerr) | 
|  | 696 | { | 
|  | 697 | struct qfq_sched *q = qdisc_priv(sch); | 
|  | 698 | struct qfq_class *cl; | 
|  | 699 | struct tcf_result res; | 
|  | 700 | int result; | 
|  | 701 |  | 
|  | 702 | if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { | 
|  | 703 | pr_debug("qfq_classify: found %d\n", skb->priority); | 
|  | 704 | cl = qfq_find_class(sch, skb->priority); | 
|  | 705 | if (cl != NULL) | 
|  | 706 | return cl; | 
|  | 707 | } | 
|  | 708 |  | 
|  | 709 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | 
|  | 710 | result = tc_classify(skb, q->filter_list, &res); | 
|  | 711 | if (result >= 0) { | 
|  | 712 | #ifdef CONFIG_NET_CLS_ACT | 
|  | 713 | switch (result) { | 
|  | 714 | case TC_ACT_QUEUED: | 
|  | 715 | case TC_ACT_STOLEN: | 
|  | 716 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; | 
|  | 717 | case TC_ACT_SHOT: | 
|  | 718 | return NULL; | 
|  | 719 | } | 
|  | 720 | #endif | 
|  | 721 | cl = (struct qfq_class *)res.class; | 
|  | 722 | if (cl == NULL) | 
|  | 723 | cl = qfq_find_class(sch, res.classid); | 
|  | 724 | return cl; | 
|  | 725 | } | 
|  | 726 |  | 
|  | 727 | return NULL; | 
|  | 728 | } | 
|  | 729 |  | 
|  | 730 | /* Generic comparison function, handling wraparound. */ | 
|  | 731 | static inline int qfq_gt(u64 a, u64 b) | 
|  | 732 | { | 
|  | 733 | return (s64)(a - b) > 0; | 
|  | 734 | } | 
|  | 735 |  | 
|  | 736 | /* Round a precise timestamp to its slotted value. */ | 
|  | 737 | static inline u64 qfq_round_down(u64 ts, unsigned int shift) | 
|  | 738 | { | 
|  | 739 | return ts & ~((1ULL << shift) - 1); | 
|  | 740 | } | 
|  | 741 |  | 
|  | 742 | /* return the pointer to the group with lowest index in the bitmap */ | 
|  | 743 | static inline struct qfq_group *qfq_ffs(struct qfq_sched *q, | 
|  | 744 | unsigned long bitmap) | 
|  | 745 | { | 
|  | 746 | int index = __ffs(bitmap); | 
|  | 747 | return &q->groups[index]; | 
|  | 748 | } | 
|  | 749 | /* Calculate a mask to mimic what would be ffs_from(). */ | 
|  | 750 | static inline unsigned long mask_from(unsigned long bitmap, int from) | 
|  | 751 | { | 
|  | 752 | return bitmap & ~((1UL << from) - 1); | 
|  | 753 | } | 
|  | 754 |  | 
|  | 755 | /* | 
|  | 756 | * The state computation relies on ER=0, IR=1, EB=2, IB=3 | 
|  | 757 | * First compute eligibility comparing grp->S, q->V, | 
|  | 758 | * then check if someone is blocking us and possibly add EB | 
|  | 759 | */ | 
|  | 760 | static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp) | 
|  | 761 | { | 
|  | 762 | /* if S > V we are not eligible */ | 
|  | 763 | unsigned int state = qfq_gt(grp->S, q->V); | 
|  | 764 | unsigned long mask = mask_from(q->bitmaps[ER], grp->index); | 
|  | 765 | struct qfq_group *next; | 
|  | 766 |  | 
|  | 767 | if (mask) { | 
|  | 768 | next = qfq_ffs(q, mask); | 
|  | 769 | if (qfq_gt(grp->F, next->F)) | 
|  | 770 | state |= EB; | 
|  | 771 | } | 
|  | 772 |  | 
|  | 773 | return state; | 
|  | 774 | } | 
|  | 775 |  | 
|  | 776 |  | 
|  | 777 | /* | 
|  | 778 | * In principle | 
|  | 779 | *	q->bitmaps[dst] |= q->bitmaps[src] & mask; | 
|  | 780 | *	q->bitmaps[src] &= ~mask; | 
|  | 781 | * but we should make sure that src != dst | 
|  | 782 | */ | 
|  | 783 | static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask, | 
|  | 784 | int src, int dst) | 
|  | 785 | { | 
|  | 786 | q->bitmaps[dst] |= q->bitmaps[src] & mask; | 
|  | 787 | q->bitmaps[src] &= ~mask; | 
|  | 788 | } | 
|  | 789 |  | 
|  | 790 | static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F) | 
|  | 791 | { | 
|  | 792 | unsigned long mask = mask_from(q->bitmaps[ER], index + 1); | 
|  | 793 | struct qfq_group *next; | 
|  | 794 |  | 
|  | 795 | if (mask) { | 
|  | 796 | next = qfq_ffs(q, mask); | 
|  | 797 | if (!qfq_gt(next->F, old_F)) | 
|  | 798 | return; | 
|  | 799 | } | 
|  | 800 |  | 
|  | 801 | mask = (1UL << index) - 1; | 
|  | 802 | qfq_move_groups(q, mask, EB, ER); | 
|  | 803 | qfq_move_groups(q, mask, IB, IR); | 
|  | 804 | } | 
|  | 805 |  | 
|  | 806 | /* | 
|  | 807 | * perhaps | 
|  | 808 | * | 
|  | 809 | old_V ^= q->V; | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 810 | old_V >>= q->min_slot_shift; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 811 | if (old_V) { | 
|  | 812 | ... | 
|  | 813 | } | 
|  | 814 | * | 
|  | 815 | */ | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 816 | static void qfq_make_eligible(struct qfq_sched *q) | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 817 | { | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 818 | unsigned long vslot = q->V >> q->min_slot_shift; | 
|  | 819 | unsigned long old_vslot = q->oldV >> q->min_slot_shift; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 820 |  | 
|  | 821 | if (vslot != old_vslot) { | 
|  | 822 | unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1; | 
|  | 823 | qfq_move_groups(q, mask, IR, ER); | 
|  | 824 | qfq_move_groups(q, mask, IB, EB); | 
|  | 825 | } | 
|  | 826 | } | 
|  | 827 |  | 
|  | 828 |  | 
|  | 829 | /* | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 830 | * The index of the slot in which the aggregate is to be inserted must | 
|  | 831 | * not be higher than QFQ_MAX_SLOTS-2. There is a '-2' and not a '-1' | 
|  | 832 | * because the start time of the group may be moved backward by one | 
|  | 833 | * slot after the aggregate has been inserted, and this would cause | 
|  | 834 | * non-empty slots to be right-shifted by one position. | 
| Paolo Valente | 3015f3d | 2012-11-05 20:29:24 +0000 | [diff] [blame] | 835 | * | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 836 | * If the weight and lmax (max_pkt_size) of the classes do not change, | 
|  | 837 | * then QFQ+ does meet the above contraint according to the current | 
|  | 838 | * values of its parameters. In fact, if the weight and lmax of the | 
|  | 839 | * classes do not change, then, from the theory, QFQ+ guarantees that | 
|  | 840 | * the slot index is never higher than | 
|  | 841 | * 2 + QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) * | 
|  | 842 | * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM) = 2 + 8 * 128 * (1 / 64) = 18 | 
| Paolo Valente | 3015f3d | 2012-11-05 20:29:24 +0000 | [diff] [blame] | 843 | * | 
|  | 844 | * When the weight of a class is increased or the lmax of the class is | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 845 | * decreased, a new aggregate with smaller slot size than the original | 
|  | 846 | * parent aggregate of the class may happen to be activated. The | 
|  | 847 | * activation of this aggregate should be properly delayed to when the | 
|  | 848 | * service of the class has finished in the ideal system tracked by | 
|  | 849 | * QFQ+. If the activation of the aggregate is not delayed to this | 
|  | 850 | * reference time instant, then this aggregate may be unjustly served | 
|  | 851 | * before other aggregates waiting for service. This may cause the | 
|  | 852 | * above bound to the slot index to be violated for some of these | 
|  | 853 | * unlucky aggregates. | 
| Paolo Valente | 3015f3d | 2012-11-05 20:29:24 +0000 | [diff] [blame] | 854 | * | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 855 | * Instead of delaying the activation of the new aggregate, which is | 
|  | 856 | * quite complex, the following inaccurate but simple solution is used: | 
|  | 857 | * if the slot index is higher than QFQ_MAX_SLOTS-2, then the | 
|  | 858 | * timestamps of the aggregate are shifted backward so as to let the | 
|  | 859 | * slot index become equal to QFQ_MAX_SLOTS-2. | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 860 | */ | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 861 | static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg, | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 862 | u64 roundedS) | 
|  | 863 | { | 
|  | 864 | u64 slot = (roundedS - grp->S) >> grp->slot_shift; | 
| Paolo Valente | 3015f3d | 2012-11-05 20:29:24 +0000 | [diff] [blame] | 865 | unsigned int i; /* slot index in the bucket list */ | 
|  | 866 |  | 
|  | 867 | if (unlikely(slot > QFQ_MAX_SLOTS - 2)) { | 
|  | 868 | u64 deltaS = roundedS - grp->S - | 
|  | 869 | ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift); | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 870 | agg->S -= deltaS; | 
|  | 871 | agg->F -= deltaS; | 
| Paolo Valente | 3015f3d | 2012-11-05 20:29:24 +0000 | [diff] [blame] | 872 | slot = QFQ_MAX_SLOTS - 2; | 
|  | 873 | } | 
|  | 874 |  | 
|  | 875 | i = (grp->front + slot) % QFQ_MAX_SLOTS; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 876 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 877 | hlist_add_head(&agg->next, &grp->slots[i]); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 878 | __set_bit(slot, &grp->full_slots); | 
|  | 879 | } | 
|  | 880 |  | 
|  | 881 | /* Maybe introduce hlist_first_entry?? */ | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 882 | static struct qfq_aggregate *qfq_slot_head(struct qfq_group *grp) | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 883 | { | 
|  | 884 | return hlist_entry(grp->slots[grp->front].first, | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 885 | struct qfq_aggregate, next); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 886 | } | 
|  | 887 |  | 
|  | 888 | /* | 
|  | 889 | * remove the entry from the slot | 
|  | 890 | */ | 
|  | 891 | static void qfq_front_slot_remove(struct qfq_group *grp) | 
|  | 892 | { | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 893 | struct qfq_aggregate *agg = qfq_slot_head(grp); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 894 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 895 | BUG_ON(!agg); | 
|  | 896 | hlist_del(&agg->next); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 897 | if (hlist_empty(&grp->slots[grp->front])) | 
|  | 898 | __clear_bit(0, &grp->full_slots); | 
|  | 899 | } | 
|  | 900 |  | 
|  | 901 | /* | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 902 | * Returns the first aggregate in the first non-empty bucket of the | 
|  | 903 | * group. As a side effect, adjusts the bucket list so the first | 
|  | 904 | * non-empty bucket is at position 0 in full_slots. | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 905 | */ | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 906 | static struct qfq_aggregate *qfq_slot_scan(struct qfq_group *grp) | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 907 | { | 
|  | 908 | unsigned int i; | 
|  | 909 |  | 
|  | 910 | pr_debug("qfq slot_scan: grp %u full %#lx\n", | 
|  | 911 | grp->index, grp->full_slots); | 
|  | 912 |  | 
|  | 913 | if (grp->full_slots == 0) | 
|  | 914 | return NULL; | 
|  | 915 |  | 
|  | 916 | i = __ffs(grp->full_slots);  /* zero based */ | 
|  | 917 | if (i > 0) { | 
|  | 918 | grp->front = (grp->front + i) % QFQ_MAX_SLOTS; | 
|  | 919 | grp->full_slots >>= i; | 
|  | 920 | } | 
|  | 921 |  | 
|  | 922 | return qfq_slot_head(grp); | 
|  | 923 | } | 
|  | 924 |  | 
|  | 925 | /* | 
|  | 926 | * adjust the bucket list. When the start time of a group decreases, | 
|  | 927 | * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to | 
|  | 928 | * move the objects. The mask of occupied slots must be shifted | 
|  | 929 | * because we use ffs() to find the first non-empty slot. | 
|  | 930 | * This covers decreases in the group's start time, but what about | 
|  | 931 | * increases of the start time ? | 
|  | 932 | * Here too we should make sure that i is less than 32 | 
|  | 933 | */ | 
|  | 934 | static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS) | 
|  | 935 | { | 
|  | 936 | unsigned int i = (grp->S - roundedS) >> grp->slot_shift; | 
|  | 937 |  | 
|  | 938 | grp->full_slots <<= i; | 
|  | 939 | grp->front = (grp->front - i) % QFQ_MAX_SLOTS; | 
|  | 940 | } | 
|  | 941 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 942 | static void qfq_update_eligible(struct qfq_sched *q) | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 943 | { | 
|  | 944 | struct qfq_group *grp; | 
|  | 945 | unsigned long ineligible; | 
|  | 946 |  | 
|  | 947 | ineligible = q->bitmaps[IR] | q->bitmaps[IB]; | 
|  | 948 | if (ineligible) { | 
|  | 949 | if (!q->bitmaps[ER]) { | 
|  | 950 | grp = qfq_ffs(q, ineligible); | 
|  | 951 | if (qfq_gt(grp->S, q->V)) | 
|  | 952 | q->V = grp->S; | 
|  | 953 | } | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 954 | qfq_make_eligible(q); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 955 | } | 
|  | 956 | } | 
|  | 957 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 958 | /* Dequeue head packet of the head class in the DRR queue of the aggregate. */ | 
|  | 959 | static void agg_dequeue(struct qfq_aggregate *agg, | 
|  | 960 | struct qfq_class *cl, unsigned int len) | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 961 | { | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 962 | qdisc_dequeue_peeked(cl->qdisc); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 963 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 964 | cl->deficit -= (int) len; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 965 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 966 | if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */ | 
|  | 967 | list_del(&cl->alist); | 
|  | 968 | else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) { | 
|  | 969 | cl->deficit += agg->lmax; | 
|  | 970 | list_move_tail(&cl->alist, &agg->active); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 971 | } | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 972 | } | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 973 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 974 | static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg, | 
|  | 975 | struct qfq_class **cl, | 
|  | 976 | unsigned int *len) | 
|  | 977 | { | 
|  | 978 | struct sk_buff *skb; | 
|  | 979 |  | 
|  | 980 | *cl = list_first_entry(&agg->active, struct qfq_class, alist); | 
|  | 981 | skb = (*cl)->qdisc->ops->peek((*cl)->qdisc); | 
|  | 982 | if (skb == NULL) | 
|  | 983 | WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n"); | 
|  | 984 | else | 
|  | 985 | *len = qdisc_pkt_len(skb); | 
|  | 986 |  | 
|  | 987 | return skb; | 
|  | 988 | } | 
|  | 989 |  | 
|  | 990 | /* Update F according to the actual service received by the aggregate. */ | 
|  | 991 | static inline void charge_actual_service(struct qfq_aggregate *agg) | 
|  | 992 | { | 
|  | 993 | /* compute the service received by the aggregate */ | 
|  | 994 | u32 service_received = agg->initial_budget - agg->budget; | 
|  | 995 |  | 
|  | 996 | agg->F = agg->S + (u64)service_received * agg->inv_w; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 997 | } | 
|  | 998 |  | 
|  | 999 | static struct sk_buff *qfq_dequeue(struct Qdisc *sch) | 
|  | 1000 | { | 
|  | 1001 | struct qfq_sched *q = qdisc_priv(sch); | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1002 | struct qfq_aggregate *in_serv_agg = q->in_serv_agg; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1003 | struct qfq_class *cl; | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1004 | struct sk_buff *skb = NULL; | 
|  | 1005 | /* next-packet len, 0 means no more active classes in in-service agg */ | 
|  | 1006 | unsigned int len = 0; | 
|  | 1007 |  | 
|  | 1008 | if (in_serv_agg == NULL) | 
|  | 1009 | return NULL; | 
|  | 1010 |  | 
|  | 1011 | if (!list_empty(&in_serv_agg->active)) | 
|  | 1012 | skb = qfq_peek_skb(in_serv_agg, &cl, &len); | 
|  | 1013 |  | 
|  | 1014 | /* | 
|  | 1015 | * If there are no active classes in the in-service aggregate, | 
|  | 1016 | * or if the aggregate has not enough budget to serve its next | 
|  | 1017 | * class, then choose the next aggregate to serve. | 
|  | 1018 | */ | 
|  | 1019 | if (len == 0 || in_serv_agg->budget < len) { | 
|  | 1020 | charge_actual_service(in_serv_agg); | 
|  | 1021 |  | 
|  | 1022 | /* recharge the budget of the aggregate */ | 
|  | 1023 | in_serv_agg->initial_budget = in_serv_agg->budget = | 
|  | 1024 | in_serv_agg->budgetmax; | 
|  | 1025 |  | 
|  | 1026 | if (!list_empty(&in_serv_agg->active)) | 
|  | 1027 | /* | 
|  | 1028 | * Still active: reschedule for | 
|  | 1029 | * service. Possible optimization: if no other | 
|  | 1030 | * aggregate is active, then there is no point | 
|  | 1031 | * in rescheduling this aggregate, and we can | 
|  | 1032 | * just keep it as the in-service one. This | 
|  | 1033 | * should be however a corner case, and to | 
|  | 1034 | * handle it, we would need to maintain an | 
|  | 1035 | * extra num_active_aggs field. | 
|  | 1036 | */ | 
|  | 1037 | qfq_activate_agg(q, in_serv_agg, requeue); | 
|  | 1038 | else if (sch->q.qlen == 0) { /* no aggregate to serve */ | 
|  | 1039 | q->in_serv_agg = NULL; | 
|  | 1040 | return NULL; | 
|  | 1041 | } | 
|  | 1042 |  | 
|  | 1043 | /* | 
|  | 1044 | * If we get here, there are other aggregates queued: | 
|  | 1045 | * choose the new aggregate to serve. | 
|  | 1046 | */ | 
|  | 1047 | in_serv_agg = q->in_serv_agg = qfq_choose_next_agg(q); | 
|  | 1048 | skb = qfq_peek_skb(in_serv_agg, &cl, &len); | 
|  | 1049 | } | 
|  | 1050 | if (!skb) | 
|  | 1051 | return NULL; | 
|  | 1052 |  | 
|  | 1053 | sch->q.qlen--; | 
|  | 1054 | qdisc_bstats_update(sch, skb); | 
|  | 1055 |  | 
|  | 1056 | agg_dequeue(in_serv_agg, cl, len); | 
|  | 1057 | in_serv_agg->budget -= len; | 
|  | 1058 | q->V += (u64)len * IWSUM; | 
|  | 1059 | pr_debug("qfq dequeue: len %u F %lld now %lld\n", | 
|  | 1060 | len, (unsigned long long) in_serv_agg->F, | 
|  | 1061 | (unsigned long long) q->V); | 
|  | 1062 |  | 
|  | 1063 | return skb; | 
|  | 1064 | } | 
|  | 1065 |  | 
|  | 1066 | static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q) | 
|  | 1067 | { | 
|  | 1068 | struct qfq_group *grp; | 
|  | 1069 | struct qfq_aggregate *agg, *new_front_agg; | 
|  | 1070 | u64 old_F; | 
|  | 1071 |  | 
|  | 1072 | qfq_update_eligible(q); | 
|  | 1073 | q->oldV = q->V; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1074 |  | 
|  | 1075 | if (!q->bitmaps[ER]) | 
|  | 1076 | return NULL; | 
|  | 1077 |  | 
|  | 1078 | grp = qfq_ffs(q, q->bitmaps[ER]); | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1079 | old_F = grp->F; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1080 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1081 | agg = qfq_slot_head(grp); | 
|  | 1082 |  | 
|  | 1083 | /* agg starts to be served, remove it from schedule */ | 
|  | 1084 | qfq_front_slot_remove(grp); | 
|  | 1085 |  | 
|  | 1086 | new_front_agg = qfq_slot_scan(grp); | 
|  | 1087 |  | 
|  | 1088 | if (new_front_agg == NULL) /* group is now inactive, remove from ER */ | 
|  | 1089 | __clear_bit(grp->index, &q->bitmaps[ER]); | 
|  | 1090 | else { | 
|  | 1091 | u64 roundedS = qfq_round_down(new_front_agg->S, | 
|  | 1092 | grp->slot_shift); | 
|  | 1093 | unsigned int s; | 
|  | 1094 |  | 
|  | 1095 | if (grp->S == roundedS) | 
|  | 1096 | return agg; | 
|  | 1097 | grp->S = roundedS; | 
|  | 1098 | grp->F = roundedS + (2ULL << grp->slot_shift); | 
|  | 1099 | __clear_bit(grp->index, &q->bitmaps[ER]); | 
|  | 1100 | s = qfq_calc_state(q, grp); | 
|  | 1101 | __set_bit(grp->index, &q->bitmaps[s]); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1102 | } | 
|  | 1103 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1104 | qfq_unblock_groups(q, grp->index, old_F); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1105 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1106 | return agg; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1107 | } | 
|  | 1108 |  | 
|  | 1109 | /* | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1110 | * Assign a reasonable start time for a new aggregate in group i. | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1111 | * Admissible values for \hat(F) are multiples of \sigma_i | 
|  | 1112 | * no greater than V+\sigma_i . Larger values mean that | 
|  | 1113 | * we had a wraparound so we consider the timestamp to be stale. | 
|  | 1114 | * | 
|  | 1115 | * If F is not stale and F >= V then we set S = F. | 
|  | 1116 | * Otherwise we should assign S = V, but this may violate | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1117 | * the ordering in EB (see [2]). So, if we have groups in ER, | 
|  | 1118 | * set S to the F_j of the first group j which would be blocking us. | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1119 | * We are guaranteed not to move S backward because | 
|  | 1120 | * otherwise our group i would still be blocked. | 
|  | 1121 | */ | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1122 | static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg) | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1123 | { | 
|  | 1124 | unsigned long mask; | 
| Eric Dumazet | 6bafcac | 2012-01-02 05:47:57 +0000 | [diff] [blame] | 1125 | u64 limit, roundedF; | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1126 | int slot_shift = agg->grp->slot_shift; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1127 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1128 | roundedF = qfq_round_down(agg->F, slot_shift); | 
| Eric Dumazet | 6bafcac | 2012-01-02 05:47:57 +0000 | [diff] [blame] | 1129 | limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1130 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1131 | if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) { | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1132 | /* timestamp was stale */ | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1133 | mask = mask_from(q->bitmaps[ER], agg->grp->index); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1134 | if (mask) { | 
|  | 1135 | struct qfq_group *next = qfq_ffs(q, mask); | 
|  | 1136 | if (qfq_gt(roundedF, next->F)) { | 
| Paolo Valente | 7126195 | 2012-09-15 00:41:35 +0000 | [diff] [blame] | 1137 | if (qfq_gt(limit, next->F)) | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1138 | agg->S = next->F; | 
| Paolo Valente | 7126195 | 2012-09-15 00:41:35 +0000 | [diff] [blame] | 1139 | else /* preserve timestamp correctness */ | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1140 | agg->S = limit; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1141 | return; | 
|  | 1142 | } | 
|  | 1143 | } | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1144 | agg->S = q->V; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1145 | } else  /* timestamp is not stale */ | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1146 | agg->S = agg->F; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1147 | } | 
|  | 1148 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1149 | /* | 
|  | 1150 | * Update the timestamps of agg before scheduling/rescheduling it for | 
|  | 1151 | * service.  In particular, assign to agg->F its maximum possible | 
|  | 1152 | * value, i.e., the virtual finish time with which the aggregate | 
|  | 1153 | * should be labeled if it used all its budget once in service. | 
|  | 1154 | */ | 
|  | 1155 | static inline void | 
|  | 1156 | qfq_update_agg_ts(struct qfq_sched *q, | 
|  | 1157 | struct qfq_aggregate *agg, enum update_reason reason) | 
|  | 1158 | { | 
|  | 1159 | if (reason != requeue) | 
|  | 1160 | qfq_update_start(q, agg); | 
|  | 1161 | else /* just charge agg for the service received */ | 
|  | 1162 | agg->S = agg->F; | 
|  | 1163 |  | 
|  | 1164 | agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w; | 
|  | 1165 | } | 
|  | 1166 |  | 
|  | 1167 | static void qfq_schedule_agg(struct qfq_sched *, struct qfq_aggregate *); | 
|  | 1168 |  | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1169 | static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 
|  | 1170 | { | 
|  | 1171 | struct qfq_sched *q = qdisc_priv(sch); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1172 | struct qfq_class *cl; | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1173 | struct qfq_aggregate *agg; | 
| David S. Miller | f54ba77 | 2012-09-27 18:35:47 -0400 | [diff] [blame] | 1174 | int err = 0; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1175 |  | 
|  | 1176 | cl = qfq_classify(skb, sch, &err); | 
|  | 1177 | if (cl == NULL) { | 
|  | 1178 | if (err & __NET_XMIT_BYPASS) | 
|  | 1179 | sch->qstats.drops++; | 
|  | 1180 | kfree_skb(skb); | 
|  | 1181 | return err; | 
|  | 1182 | } | 
|  | 1183 | pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); | 
|  | 1184 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1185 | if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) { | 
| Paolo Valente | 3015f3d | 2012-11-05 20:29:24 +0000 | [diff] [blame] | 1186 | pr_debug("qfq: increasing maxpkt from %u to %u for class %u", | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1187 | cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid); | 
|  | 1188 | err = qfq_change_agg(sch, cl, cl->agg->class_weight, | 
|  | 1189 | qdisc_pkt_len(skb)); | 
|  | 1190 | if (err) | 
|  | 1191 | return err; | 
| Paolo Valente | 3015f3d | 2012-11-05 20:29:24 +0000 | [diff] [blame] | 1192 | } | 
|  | 1193 |  | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1194 | err = qdisc_enqueue(skb, cl->qdisc); | 
|  | 1195 | if (unlikely(err != NET_XMIT_SUCCESS)) { | 
|  | 1196 | pr_debug("qfq_enqueue: enqueue failed %d\n", err); | 
|  | 1197 | if (net_xmit_drop_count(err)) { | 
|  | 1198 | cl->qstats.drops++; | 
|  | 1199 | sch->qstats.drops++; | 
|  | 1200 | } | 
|  | 1201 | return err; | 
|  | 1202 | } | 
|  | 1203 |  | 
|  | 1204 | bstats_update(&cl->bstats, skb); | 
|  | 1205 | ++sch->q.qlen; | 
|  | 1206 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1207 | agg = cl->agg; | 
|  | 1208 | /* if the queue was not empty, then done here */ | 
|  | 1209 | if (cl->qdisc->q.qlen != 1) { | 
|  | 1210 | if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && | 
|  | 1211 | list_first_entry(&agg->active, struct qfq_class, alist) | 
|  | 1212 | == cl && cl->deficit < qdisc_pkt_len(skb)) | 
|  | 1213 | list_move_tail(&cl->alist, &agg->active); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1214 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1215 | return err; | 
|  | 1216 | } | 
|  | 1217 |  | 
|  | 1218 | /* schedule class for service within the aggregate */ | 
|  | 1219 | cl->deficit = agg->lmax; | 
|  | 1220 | list_add_tail(&cl->alist, &agg->active); | 
|  | 1221 |  | 
|  | 1222 | if (list_first_entry(&agg->active, struct qfq_class, alist) != cl) | 
|  | 1223 | return err; /* aggregate was not empty, nothing else to do */ | 
|  | 1224 |  | 
|  | 1225 | /* recharge budget */ | 
|  | 1226 | agg->initial_budget = agg->budget = agg->budgetmax; | 
|  | 1227 |  | 
|  | 1228 | qfq_update_agg_ts(q, agg, enqueue); | 
|  | 1229 | if (q->in_serv_agg == NULL) | 
|  | 1230 | q->in_serv_agg = agg; | 
|  | 1231 | else if (agg != q->in_serv_agg) | 
|  | 1232 | qfq_schedule_agg(q, agg); | 
| Paolo Valente | be72f63 | 2012-08-07 07:27:25 +0000 | [diff] [blame] | 1233 |  | 
|  | 1234 | return err; | 
|  | 1235 | } | 
|  | 1236 |  | 
|  | 1237 | /* | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1238 | * Schedule aggregate according to its timestamps. | 
| Paolo Valente | be72f63 | 2012-08-07 07:27:25 +0000 | [diff] [blame] | 1239 | */ | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1240 | static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg) | 
| Paolo Valente | be72f63 | 2012-08-07 07:27:25 +0000 | [diff] [blame] | 1241 | { | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1242 | struct qfq_group *grp = agg->grp; | 
| Paolo Valente | be72f63 | 2012-08-07 07:27:25 +0000 | [diff] [blame] | 1243 | u64 roundedS; | 
|  | 1244 | int s; | 
|  | 1245 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1246 | roundedS = qfq_round_down(agg->S, grp->slot_shift); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1247 |  | 
|  | 1248 | /* | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1249 | * Insert agg in the correct bucket. | 
|  | 1250 | * If agg->S >= grp->S we don't need to adjust the | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1251 | * bucket list and simply go to the insertion phase. | 
|  | 1252 | * Otherwise grp->S is decreasing, we must make room | 
|  | 1253 | * in the bucket list, and also recompute the group state. | 
|  | 1254 | * Finally, if there were no flows in this group and nobody | 
|  | 1255 | * was in ER make sure to adjust V. | 
|  | 1256 | */ | 
|  | 1257 | if (grp->full_slots) { | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1258 | if (!qfq_gt(grp->S, agg->S)) | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1259 | goto skip_update; | 
|  | 1260 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1261 | /* create a slot for this agg->S */ | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1262 | qfq_slot_rotate(grp, roundedS); | 
|  | 1263 | /* group was surely ineligible, remove */ | 
|  | 1264 | __clear_bit(grp->index, &q->bitmaps[IR]); | 
|  | 1265 | __clear_bit(grp->index, &q->bitmaps[IB]); | 
|  | 1266 | } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V)) | 
|  | 1267 | q->V = roundedS; | 
|  | 1268 |  | 
|  | 1269 | grp->S = roundedS; | 
|  | 1270 | grp->F = roundedS + (2ULL << grp->slot_shift); | 
|  | 1271 | s = qfq_calc_state(q, grp); | 
|  | 1272 | __set_bit(grp->index, &q->bitmaps[s]); | 
|  | 1273 |  | 
|  | 1274 | pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n", | 
|  | 1275 | s, q->bitmaps[s], | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1276 | (unsigned long long) agg->S, | 
|  | 1277 | (unsigned long long) agg->F, | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1278 | (unsigned long long) q->V); | 
|  | 1279 |  | 
|  | 1280 | skip_update: | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1281 | qfq_slot_insert(grp, agg, roundedS); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1282 | } | 
|  | 1283 |  | 
|  | 1284 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1285 | /* Update agg ts and schedule agg for service */ | 
|  | 1286 | static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg, | 
|  | 1287 | enum update_reason reason) | 
|  | 1288 | { | 
|  | 1289 | qfq_update_agg_ts(q, agg, reason); | 
|  | 1290 | qfq_schedule_agg(q, agg); | 
|  | 1291 | } | 
|  | 1292 |  | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1293 | static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1294 | struct qfq_aggregate *agg) | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1295 | { | 
|  | 1296 | unsigned int i, offset; | 
|  | 1297 | u64 roundedS; | 
|  | 1298 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1299 | roundedS = qfq_round_down(agg->S, grp->slot_shift); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1300 | offset = (roundedS - grp->S) >> grp->slot_shift; | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1301 |  | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1302 | i = (grp->front + offset) % QFQ_MAX_SLOTS; | 
|  | 1303 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1304 | hlist_del(&agg->next); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1305 | if (hlist_empty(&grp->slots[i])) | 
|  | 1306 | __clear_bit(offset, &grp->full_slots); | 
|  | 1307 | } | 
|  | 1308 |  | 
|  | 1309 | /* | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1310 | * Called to forcibly deschedule an aggregate.  If the aggregate is | 
|  | 1311 | * not in the front bucket, or if the latter has other aggregates in | 
|  | 1312 | * the front bucket, we can simply remove the aggregate with no other | 
|  | 1313 | * side effects. | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1314 | * Otherwise we must propagate the event up. | 
|  | 1315 | */ | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1316 | static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg) | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1317 | { | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1318 | struct qfq_group *grp = agg->grp; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1319 | unsigned long mask; | 
|  | 1320 | u64 roundedS; | 
|  | 1321 | int s; | 
|  | 1322 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1323 | if (agg == q->in_serv_agg) { | 
|  | 1324 | charge_actual_service(agg); | 
|  | 1325 | q->in_serv_agg = qfq_choose_next_agg(q); | 
|  | 1326 | return; | 
|  | 1327 | } | 
|  | 1328 |  | 
|  | 1329 | agg->F = agg->S; | 
|  | 1330 | qfq_slot_remove(q, grp, agg); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1331 |  | 
|  | 1332 | if (!grp->full_slots) { | 
|  | 1333 | __clear_bit(grp->index, &q->bitmaps[IR]); | 
|  | 1334 | __clear_bit(grp->index, &q->bitmaps[EB]); | 
|  | 1335 | __clear_bit(grp->index, &q->bitmaps[IB]); | 
|  | 1336 |  | 
|  | 1337 | if (test_bit(grp->index, &q->bitmaps[ER]) && | 
|  | 1338 | !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) { | 
|  | 1339 | mask = q->bitmaps[ER] & ((1UL << grp->index) - 1); | 
|  | 1340 | if (mask) | 
|  | 1341 | mask = ~((1UL << __fls(mask)) - 1); | 
|  | 1342 | else | 
|  | 1343 | mask = ~0UL; | 
|  | 1344 | qfq_move_groups(q, mask, EB, ER); | 
|  | 1345 | qfq_move_groups(q, mask, IB, IR); | 
|  | 1346 | } | 
|  | 1347 | __clear_bit(grp->index, &q->bitmaps[ER]); | 
|  | 1348 | } else if (hlist_empty(&grp->slots[grp->front])) { | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1349 | agg = qfq_slot_scan(grp); | 
|  | 1350 | roundedS = qfq_round_down(agg->S, grp->slot_shift); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1351 | if (grp->S != roundedS) { | 
|  | 1352 | __clear_bit(grp->index, &q->bitmaps[ER]); | 
|  | 1353 | __clear_bit(grp->index, &q->bitmaps[IR]); | 
|  | 1354 | __clear_bit(grp->index, &q->bitmaps[EB]); | 
|  | 1355 | __clear_bit(grp->index, &q->bitmaps[IB]); | 
|  | 1356 | grp->S = roundedS; | 
|  | 1357 | grp->F = roundedS + (2ULL << grp->slot_shift); | 
|  | 1358 | s = qfq_calc_state(q, grp); | 
|  | 1359 | __set_bit(grp->index, &q->bitmaps[s]); | 
|  | 1360 | } | 
|  | 1361 | } | 
|  | 1362 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1363 | qfq_update_eligible(q); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1364 | } | 
|  | 1365 |  | 
|  | 1366 | static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) | 
|  | 1367 | { | 
|  | 1368 | struct qfq_sched *q = qdisc_priv(sch); | 
|  | 1369 | struct qfq_class *cl = (struct qfq_class *)arg; | 
|  | 1370 |  | 
|  | 1371 | if (cl->qdisc->q.qlen == 0) | 
|  | 1372 | qfq_deactivate_class(q, cl); | 
|  | 1373 | } | 
|  | 1374 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1375 | static unsigned int qfq_drop_from_slot(struct qfq_sched *q, | 
|  | 1376 | struct hlist_head *slot) | 
|  | 1377 | { | 
|  | 1378 | struct qfq_aggregate *agg; | 
|  | 1379 | struct hlist_node *n; | 
|  | 1380 | struct qfq_class *cl; | 
|  | 1381 | unsigned int len; | 
|  | 1382 |  | 
|  | 1383 | hlist_for_each_entry(agg, n, slot, next) { | 
|  | 1384 | list_for_each_entry(cl, &agg->active, alist) { | 
|  | 1385 |  | 
|  | 1386 | if (!cl->qdisc->ops->drop) | 
|  | 1387 | continue; | 
|  | 1388 |  | 
|  | 1389 | len = cl->qdisc->ops->drop(cl->qdisc); | 
|  | 1390 | if (len > 0) { | 
|  | 1391 | if (cl->qdisc->q.qlen == 0) | 
|  | 1392 | qfq_deactivate_class(q, cl); | 
|  | 1393 |  | 
|  | 1394 | return len; | 
|  | 1395 | } | 
|  | 1396 | } | 
|  | 1397 | } | 
|  | 1398 | return 0; | 
|  | 1399 | } | 
|  | 1400 |  | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1401 | static unsigned int qfq_drop(struct Qdisc *sch) | 
|  | 1402 | { | 
|  | 1403 | struct qfq_sched *q = qdisc_priv(sch); | 
|  | 1404 | struct qfq_group *grp; | 
|  | 1405 | unsigned int i, j, len; | 
|  | 1406 |  | 
|  | 1407 | for (i = 0; i <= QFQ_MAX_INDEX; i++) { | 
|  | 1408 | grp = &q->groups[i]; | 
|  | 1409 | for (j = 0; j < QFQ_MAX_SLOTS; j++) { | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1410 | len = qfq_drop_from_slot(q, &grp->slots[j]); | 
|  | 1411 | if (len > 0) { | 
|  | 1412 | sch->q.qlen--; | 
|  | 1413 | return len; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1414 | } | 
|  | 1415 | } | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1416 |  | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1417 | } | 
|  | 1418 |  | 
|  | 1419 | return 0; | 
|  | 1420 | } | 
|  | 1421 |  | 
|  | 1422 | static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt) | 
|  | 1423 | { | 
|  | 1424 | struct qfq_sched *q = qdisc_priv(sch); | 
|  | 1425 | struct qfq_group *grp; | 
|  | 1426 | int i, j, err; | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1427 | u32 max_cl_shift, maxbudg_shift, max_classes; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1428 |  | 
|  | 1429 | err = qdisc_class_hash_init(&q->clhash); | 
|  | 1430 | if (err < 0) | 
|  | 1431 | return err; | 
|  | 1432 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1433 | if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES) | 
|  | 1434 | max_classes = QFQ_MAX_AGG_CLASSES; | 
|  | 1435 | else | 
|  | 1436 | max_classes = qdisc_dev(sch)->tx_queue_len + 1; | 
|  | 1437 | /* max_cl_shift = floor(log_2(max_classes)) */ | 
|  | 1438 | max_cl_shift = __fls(max_classes); | 
|  | 1439 | q->max_agg_classes = 1<<max_cl_shift; | 
|  | 1440 |  | 
|  | 1441 | /* maxbudg_shift = log2(max_len * max_classes_per_agg) */ | 
|  | 1442 | maxbudg_shift = QFQ_MTU_SHIFT + max_cl_shift; | 
|  | 1443 | q->min_slot_shift = FRAC_BITS + maxbudg_shift - QFQ_MAX_INDEX; | 
|  | 1444 |  | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1445 | for (i = 0; i <= QFQ_MAX_INDEX; i++) { | 
|  | 1446 | grp = &q->groups[i]; | 
|  | 1447 | grp->index = i; | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1448 | grp->slot_shift = q->min_slot_shift + i; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1449 | for (j = 0; j < QFQ_MAX_SLOTS; j++) | 
|  | 1450 | INIT_HLIST_HEAD(&grp->slots[j]); | 
|  | 1451 | } | 
|  | 1452 |  | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1453 | INIT_HLIST_HEAD(&q->nonfull_aggs); | 
|  | 1454 |  | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1455 | return 0; | 
|  | 1456 | } | 
|  | 1457 |  | 
|  | 1458 | static void qfq_reset_qdisc(struct Qdisc *sch) | 
|  | 1459 | { | 
|  | 1460 | struct qfq_sched *q = qdisc_priv(sch); | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1461 | struct qfq_class *cl; | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1462 | struct hlist_node *n; | 
|  | 1463 | unsigned int i; | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1464 |  | 
|  | 1465 | for (i = 0; i < q->clhash.hashsize; i++) { | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1466 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { | 
|  | 1467 | if (cl->qdisc->q.qlen > 0) | 
|  | 1468 | qfq_deactivate_class(q, cl); | 
|  | 1469 |  | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1470 | qdisc_reset(cl->qdisc); | 
| Paolo Valente | 462dbc9 | 2012-11-23 11:03:19 +0000 | [diff] [blame] | 1471 | } | 
| stephen hemminger | 0545a30 | 2011-04-04 05:30:58 +0000 | [diff] [blame] | 1472 | } | 
|  | 1473 | sch->q.qlen = 0; | 
|  | 1474 | } | 
|  | 1475 |  | 
|  | 1476 | static void qfq_destroy_qdisc(struct Qdisc *sch) | 
|  | 1477 | { | 
|  | 1478 | struct qfq_sched *q = qdisc_priv(sch); | 
|  | 1479 | struct qfq_class *cl; | 
|  | 1480 | struct hlist_node *n, *next; | 
|  | 1481 | unsigned int i; | 
|  | 1482 |  | 
|  | 1483 | tcf_destroy_chain(&q->filter_list); | 
|  | 1484 |  | 
|  | 1485 | for (i = 0; i < q->clhash.hashsize; i++) { | 
|  | 1486 | hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], | 
|  | 1487 | common.hnode) { | 
|  | 1488 | qfq_destroy_class(sch, cl); | 
|  | 1489 | } | 
|  | 1490 | } | 
|  | 1491 | qdisc_class_hash_destroy(&q->clhash); | 
|  | 1492 | } | 
|  | 1493 |  | 
|  | 1494 | static const struct Qdisc_class_ops qfq_class_ops = { | 
|  | 1495 | .change		= qfq_change_class, | 
|  | 1496 | .delete		= qfq_delete_class, | 
|  | 1497 | .get		= qfq_get_class, | 
|  | 1498 | .put		= qfq_put_class, | 
|  | 1499 | .tcf_chain	= qfq_tcf_chain, | 
|  | 1500 | .bind_tcf	= qfq_bind_tcf, | 
|  | 1501 | .unbind_tcf	= qfq_unbind_tcf, | 
|  | 1502 | .graft		= qfq_graft_class, | 
|  | 1503 | .leaf		= qfq_class_leaf, | 
|  | 1504 | .qlen_notify	= qfq_qlen_notify, | 
|  | 1505 | .dump		= qfq_dump_class, | 
|  | 1506 | .dump_stats	= qfq_dump_class_stats, | 
|  | 1507 | .walk		= qfq_walk, | 
|  | 1508 | }; | 
|  | 1509 |  | 
|  | 1510 | static struct Qdisc_ops qfq_qdisc_ops __read_mostly = { | 
|  | 1511 | .cl_ops		= &qfq_class_ops, | 
|  | 1512 | .id		= "qfq", | 
|  | 1513 | .priv_size	= sizeof(struct qfq_sched), | 
|  | 1514 | .enqueue	= qfq_enqueue, | 
|  | 1515 | .dequeue	= qfq_dequeue, | 
|  | 1516 | .peek		= qdisc_peek_dequeued, | 
|  | 1517 | .drop		= qfq_drop, | 
|  | 1518 | .init		= qfq_init_qdisc, | 
|  | 1519 | .reset		= qfq_reset_qdisc, | 
|  | 1520 | .destroy	= qfq_destroy_qdisc, | 
|  | 1521 | .owner		= THIS_MODULE, | 
|  | 1522 | }; | 
|  | 1523 |  | 
|  | 1524 | static int __init qfq_init(void) | 
|  | 1525 | { | 
|  | 1526 | return register_qdisc(&qfq_qdisc_ops); | 
|  | 1527 | } | 
|  | 1528 |  | 
|  | 1529 | static void __exit qfq_exit(void) | 
|  | 1530 | { | 
|  | 1531 | unregister_qdisc(&qfq_qdisc_ops); | 
|  | 1532 | } | 
|  | 1533 |  | 
|  | 1534 | module_init(qfq_init); | 
|  | 1535 | module_exit(qfq_exit); | 
|  | 1536 | MODULE_LICENSE("GPL"); |