1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008, Intel Corporation.
5 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
17 #include <net/pkt_cls.h>
19 struct multiq_sched_data {
23 struct tcf_proto __rcu *filter_list;
24 struct tcf_block *block;
25 struct Qdisc **queues;
30 multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
32 struct multiq_sched_data *q = qdisc_priv(sch);
34 struct tcf_result res;
35 struct tcf_proto *fl = rcu_dereference_bh(q->filter_list);
38 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
39 err = tcf_classify(skb, fl, &res, false);
40 #ifdef CONFIG_NET_CLS_ACT
45 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
51 band = skb_get_queue_mapping(skb);
56 return q->queues[band];
60 multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
61 struct sk_buff **to_free)
66 qdisc = multiq_classify(skb, sch, &ret);
67 #ifdef CONFIG_NET_CLS_ACT
70 if (ret & __NET_XMIT_BYPASS)
71 qdisc_qstats_drop(sch);
72 __qdisc_drop(skb, to_free);
77 ret = qdisc_enqueue(skb, qdisc, to_free);
78 if (ret == NET_XMIT_SUCCESS) {
80 return NET_XMIT_SUCCESS;
82 if (net_xmit_drop_count(ret))
83 qdisc_qstats_drop(sch);
87 static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
89 struct multiq_sched_data *q = qdisc_priv(sch);
94 for (band = 0; band < q->bands; band++) {
95 /* cycle through bands to ensure fairness */
97 if (q->curband >= q->bands)
100 /* Check that target subqueue is available before
101 * pulling an skb to avoid head-of-line blocking.
103 if (!netif_xmit_stopped(
104 netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
105 qdisc = q->queues[q->curband];
106 skb = qdisc->dequeue(qdisc);
108 qdisc_bstats_update(sch, skb);
118 static struct sk_buff *multiq_peek(struct Qdisc *sch)
120 struct multiq_sched_data *q = qdisc_priv(sch);
121 unsigned int curband = q->curband;
126 for (band = 0; band < q->bands; band++) {
127 /* cycle through bands to ensure fairness */
129 if (curband >= q->bands)
132 /* Check that target subqueue is available before
133 * pulling an skb to avoid head-of-line blocking.
135 if (!netif_xmit_stopped(
136 netdev_get_tx_queue(qdisc_dev(sch), curband))) {
137 qdisc = q->queues[curband];
138 skb = qdisc->ops->peek(qdisc);
148 multiq_reset(struct Qdisc *sch)
151 struct multiq_sched_data *q = qdisc_priv(sch);
153 for (band = 0; band < q->bands; band++)
154 qdisc_reset(q->queues[band]);
160 multiq_destroy(struct Qdisc *sch)
163 struct multiq_sched_data *q = qdisc_priv(sch);
165 tcf_block_put(q->block);
166 for (band = 0; band < q->bands; band++)
167 qdisc_put(q->queues[band]);
172 static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
173 struct netlink_ext_ack *extack)
175 struct multiq_sched_data *q = qdisc_priv(sch);
176 struct tc_multiq_qopt *qopt;
179 if (!netif_is_multiqueue(qdisc_dev(sch)))
181 if (nla_len(opt) < sizeof(*qopt))
184 qopt = nla_data(opt);
186 qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
189 q->bands = qopt->bands;
190 for (i = q->bands; i < q->max_bands; i++) {
191 if (q->queues[i] != &noop_qdisc) {
192 struct Qdisc *child = q->queues[i];
194 q->queues[i] = &noop_qdisc;
195 qdisc_tree_flush_backlog(child);
200 sch_tree_unlock(sch);
202 for (i = 0; i < q->bands; i++) {
203 if (q->queues[i] == &noop_qdisc) {
204 struct Qdisc *child, *old;
205 child = qdisc_create_dflt(sch->dev_queue,
207 TC_H_MAKE(sch->handle,
212 q->queues[i] = child;
213 if (child != &noop_qdisc)
214 qdisc_hash_add(child, true);
216 if (old != &noop_qdisc) {
217 qdisc_tree_flush_backlog(old);
220 sch_tree_unlock(sch);
227 static int multiq_init(struct Qdisc *sch, struct nlattr *opt,
228 struct netlink_ext_ack *extack)
230 struct multiq_sched_data *q = qdisc_priv(sch);
238 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
242 q->max_bands = qdisc_dev(sch)->num_tx_queues;
244 q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
247 for (i = 0; i < q->max_bands; i++)
248 q->queues[i] = &noop_qdisc;
250 return multiq_tune(sch, opt, extack);
253 static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
255 struct multiq_sched_data *q = qdisc_priv(sch);
256 unsigned char *b = skb_tail_pointer(skb);
257 struct tc_multiq_qopt opt;
259 opt.bands = q->bands;
260 opt.max_bands = q->max_bands;
262 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
263 goto nla_put_failure;
272 static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
273 struct Qdisc **old, struct netlink_ext_ack *extack)
275 struct multiq_sched_data *q = qdisc_priv(sch);
276 unsigned long band = arg - 1;
281 *old = qdisc_replace(sch, new, &q->queues[band]);
285 static struct Qdisc *
286 multiq_leaf(struct Qdisc *sch, unsigned long arg)
288 struct multiq_sched_data *q = qdisc_priv(sch);
289 unsigned long band = arg - 1;
291 return q->queues[band];
294 static unsigned long multiq_find(struct Qdisc *sch, u32 classid)
296 struct multiq_sched_data *q = qdisc_priv(sch);
297 unsigned long band = TC_H_MIN(classid);
299 if (band - 1 >= q->bands)
304 static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
307 return multiq_find(sch, classid);
311 static void multiq_unbind(struct Qdisc *q, unsigned long cl)
315 static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
316 struct sk_buff *skb, struct tcmsg *tcm)
318 struct multiq_sched_data *q = qdisc_priv(sch);
320 tcm->tcm_handle |= TC_H_MIN(cl);
321 tcm->tcm_info = q->queues[cl - 1]->handle;
325 static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
328 struct multiq_sched_data *q = qdisc_priv(sch);
331 cl_q = q->queues[cl - 1];
332 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
333 d, NULL, &cl_q->bstats) < 0 ||
334 qdisc_qstats_copy(d, cl_q) < 0)
340 static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
342 struct multiq_sched_data *q = qdisc_priv(sch);
348 for (band = 0; band < q->bands; band++) {
349 if (arg->count < arg->skip) {
353 if (arg->fn(sch, band + 1, arg) < 0) {
361 static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl,
362 struct netlink_ext_ack *extack)
364 struct multiq_sched_data *q = qdisc_priv(sch);
371 static const struct Qdisc_class_ops multiq_class_ops = {
372 .graft = multiq_graft,
376 .tcf_block = multiq_tcf_block,
377 .bind_tcf = multiq_bind,
378 .unbind_tcf = multiq_unbind,
379 .dump = multiq_dump_class,
380 .dump_stats = multiq_dump_class_stats,
383 static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
385 .cl_ops = &multiq_class_ops,
387 .priv_size = sizeof(struct multiq_sched_data),
388 .enqueue = multiq_enqueue,
389 .dequeue = multiq_dequeue,
392 .reset = multiq_reset,
393 .destroy = multiq_destroy,
394 .change = multiq_tune,
396 .owner = THIS_MODULE,
399 static int __init multiq_module_init(void)
401 return register_qdisc(&multiq_qdisc_ops);
404 static void __exit multiq_module_exit(void)
406 unregister_qdisc(&multiq_qdisc_ops);
409 module_init(multiq_module_init)
410 module_exit(multiq_module_exit)
412 MODULE_LICENSE("GPL");