]> asedeno.scripts.mit.edu Git - linux.git/blob - net/sched/act_mirred.c
Merge tag 'scsi-sg' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux.git] / net / sched / act_mirred.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/act_mirred.c       packet mirroring and redirect actions
4  *
5  * Authors:     Jamal Hadi Salim (2002-4)
6  *
7  * TODO: Add ingress support (and socket redirect support)
8  */
9
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/gfp.h>
19 #include <linux/if_arp.h>
20 #include <net/net_namespace.h>
21 #include <net/netlink.h>
22 #include <net/pkt_sched.h>
23 #include <net/pkt_cls.h>
24 #include <linux/tc_act/tc_mirred.h>
25 #include <net/tc_act/tc_mirred.h>
26
27 static LIST_HEAD(mirred_list);
28 static DEFINE_SPINLOCK(mirred_list_lock);
29
30 #define MIRRED_RECURSION_LIMIT    4
31 static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
32
33 static bool tcf_mirred_is_act_redirect(int action)
34 {
35         return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
36 }
37
38 static bool tcf_mirred_act_wants_ingress(int action)
39 {
40         switch (action) {
41         case TCA_EGRESS_REDIR:
42         case TCA_EGRESS_MIRROR:
43                 return false;
44         case TCA_INGRESS_REDIR:
45         case TCA_INGRESS_MIRROR:
46                 return true;
47         default:
48                 BUG();
49         }
50 }
51
52 static bool tcf_mirred_can_reinsert(int action)
53 {
54         switch (action) {
55         case TC_ACT_SHOT:
56         case TC_ACT_STOLEN:
57         case TC_ACT_QUEUED:
58         case TC_ACT_TRAP:
59                 return true;
60         }
61         return false;
62 }
63
64 static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
65 {
66         return rcu_dereference_protected(m->tcfm_dev,
67                                          lockdep_is_held(&m->tcf_lock));
68 }
69
70 static void tcf_mirred_release(struct tc_action *a)
71 {
72         struct tcf_mirred *m = to_mirred(a);
73         struct net_device *dev;
74
75         spin_lock(&mirred_list_lock);
76         list_del(&m->tcfm_list);
77         spin_unlock(&mirred_list_lock);
78
79         /* last reference to action, no need to lock */
80         dev = rcu_dereference_protected(m->tcfm_dev, 1);
81         if (dev)
82                 dev_put(dev);
83 }
84
85 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
86         [TCA_MIRRED_PARMS]      = { .len = sizeof(struct tc_mirred) },
87 };
88
89 static unsigned int mirred_net_id;
90 static struct tc_action_ops act_mirred_ops;
91
92 static int tcf_mirred_init(struct net *net, struct nlattr *nla,
93                            struct nlattr *est, struct tc_action **a,
94                            int ovr, int bind, bool rtnl_held,
95                            struct tcf_proto *tp,
96                            struct netlink_ext_ack *extack)
97 {
98         struct tc_action_net *tn = net_generic(net, mirred_net_id);
99         struct nlattr *tb[TCA_MIRRED_MAX + 1];
100         struct tcf_chain *goto_ch = NULL;
101         bool mac_header_xmit = false;
102         struct tc_mirred *parm;
103         struct tcf_mirred *m;
104         struct net_device *dev;
105         bool exists = false;
106         int ret, err;
107
108         if (!nla) {
109                 NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
110                 return -EINVAL;
111         }
112         ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
113                                           mirred_policy, extack);
114         if (ret < 0)
115                 return ret;
116         if (!tb[TCA_MIRRED_PARMS]) {
117                 NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
118                 return -EINVAL;
119         }
120         parm = nla_data(tb[TCA_MIRRED_PARMS]);
121
122         err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
123         if (err < 0)
124                 return err;
125         exists = err;
126         if (exists && bind)
127                 return 0;
128
129         switch (parm->eaction) {
130         case TCA_EGRESS_MIRROR:
131         case TCA_EGRESS_REDIR:
132         case TCA_INGRESS_REDIR:
133         case TCA_INGRESS_MIRROR:
134                 break;
135         default:
136                 if (exists)
137                         tcf_idr_release(*a, bind);
138                 else
139                         tcf_idr_cleanup(tn, parm->index);
140                 NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
141                 return -EINVAL;
142         }
143
144         if (!exists) {
145                 if (!parm->ifindex) {
146                         tcf_idr_cleanup(tn, parm->index);
147                         NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
148                         return -EINVAL;
149                 }
150                 ret = tcf_idr_create(tn, parm->index, est, a,
151                                      &act_mirred_ops, bind, true);
152                 if (ret) {
153                         tcf_idr_cleanup(tn, parm->index);
154                         return ret;
155                 }
156                 ret = ACT_P_CREATED;
157         } else if (!ovr) {
158                 tcf_idr_release(*a, bind);
159                 return -EEXIST;
160         }
161
162         m = to_mirred(*a);
163         if (ret == ACT_P_CREATED)
164                 INIT_LIST_HEAD(&m->tcfm_list);
165
166         err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
167         if (err < 0)
168                 goto release_idr;
169
170         spin_lock_bh(&m->tcf_lock);
171
172         if (parm->ifindex) {
173                 dev = dev_get_by_index(net, parm->ifindex);
174                 if (!dev) {
175                         spin_unlock_bh(&m->tcf_lock);
176                         err = -ENODEV;
177                         goto put_chain;
178                 }
179                 mac_header_xmit = dev_is_mac_header_xmit(dev);
180                 rcu_swap_protected(m->tcfm_dev, dev,
181                                    lockdep_is_held(&m->tcf_lock));
182                 if (dev)
183                         dev_put(dev);
184                 m->tcfm_mac_header_xmit = mac_header_xmit;
185         }
186         goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
187         m->tcfm_eaction = parm->eaction;
188         spin_unlock_bh(&m->tcf_lock);
189         if (goto_ch)
190                 tcf_chain_put_by_act(goto_ch);
191
192         if (ret == ACT_P_CREATED) {
193                 spin_lock(&mirred_list_lock);
194                 list_add(&m->tcfm_list, &mirred_list);
195                 spin_unlock(&mirred_list_lock);
196
197                 tcf_idr_insert(tn, *a);
198         }
199
200         return ret;
201 put_chain:
202         if (goto_ch)
203                 tcf_chain_put_by_act(goto_ch);
204 release_idr:
205         tcf_idr_release(*a, bind);
206         return err;
207 }
208
209 static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
210                           struct tcf_result *res)
211 {
212         struct tcf_mirred *m = to_mirred(a);
213         struct sk_buff *skb2 = skb;
214         bool m_mac_header_xmit;
215         struct net_device *dev;
216         unsigned int rec_level;
217         int retval, err = 0;
218         bool use_reinsert;
219         bool want_ingress;
220         bool is_redirect;
221         int m_eaction;
222         int mac_len;
223
224         rec_level = __this_cpu_inc_return(mirred_rec_level);
225         if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
226                 net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
227                                      netdev_name(skb->dev));
228                 __this_cpu_dec(mirred_rec_level);
229                 return TC_ACT_SHOT;
230         }
231
232         tcf_lastuse_update(&m->tcf_tm);
233         bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
234
235         m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
236         m_eaction = READ_ONCE(m->tcfm_eaction);
237         retval = READ_ONCE(m->tcf_action);
238         dev = rcu_dereference_bh(m->tcfm_dev);
239         if (unlikely(!dev)) {
240                 pr_notice_once("tc mirred: target device is gone\n");
241                 goto out;
242         }
243
244         if (unlikely(!(dev->flags & IFF_UP))) {
245                 net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
246                                        dev->name);
247                 goto out;
248         }
249
250         /* we could easily avoid the clone only if called by ingress and clsact;
251          * since we can't easily detect the clsact caller, skip clone only for
252          * ingress - that covers the TC S/W datapath.
253          */
254         is_redirect = tcf_mirred_is_act_redirect(m_eaction);
255         use_reinsert = skb_at_tc_ingress(skb) && is_redirect &&
256                        tcf_mirred_can_reinsert(retval);
257         if (!use_reinsert) {
258                 skb2 = skb_clone(skb, GFP_ATOMIC);
259                 if (!skb2)
260                         goto out;
261         }
262
263         /* If action's target direction differs than filter's direction,
264          * and devices expect a mac header on xmit, then mac push/pull is
265          * needed.
266          */
267         want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
268         if (skb_at_tc_ingress(skb) != want_ingress && m_mac_header_xmit) {
269                 if (!skb_at_tc_ingress(skb)) {
270                         /* caught at egress, act ingress: pull mac */
271                         mac_len = skb_network_header(skb) - skb_mac_header(skb);
272                         skb_pull_rcsum(skb2, mac_len);
273                 } else {
274                         /* caught at ingress, act egress: push mac */
275                         skb_push_rcsum(skb2, skb->mac_len);
276                 }
277         }
278
279         skb2->skb_iif = skb->dev->ifindex;
280         skb2->dev = dev;
281
282         /* mirror is always swallowed */
283         if (is_redirect) {
284                 skb2->tc_redirected = 1;
285                 skb2->tc_from_ingress = skb2->tc_at_ingress;
286                 if (skb2->tc_from_ingress)
287                         skb2->tstamp = 0;
288                 /* let's the caller reinsert the packet, if possible */
289                 if (use_reinsert) {
290                         res->ingress = want_ingress;
291                         res->qstats = this_cpu_ptr(m->common.cpu_qstats);
292                         skb_tc_reinsert(skb, res);
293                         __this_cpu_dec(mirred_rec_level);
294                         return TC_ACT_CONSUMED;
295                 }
296         }
297
298         if (!want_ingress)
299                 err = dev_queue_xmit(skb2);
300         else
301                 err = netif_receive_skb(skb2);
302
303         if (err) {
304 out:
305                 qstats_overlimit_inc(this_cpu_ptr(m->common.cpu_qstats));
306                 if (tcf_mirred_is_act_redirect(m_eaction))
307                         retval = TC_ACT_SHOT;
308         }
309         __this_cpu_dec(mirred_rec_level);
310
311         return retval;
312 }
313
314 static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
315                              u64 lastuse, bool hw)
316 {
317         struct tcf_mirred *m = to_mirred(a);
318         struct tcf_t *tm = &m->tcf_tm;
319
320         _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
321         if (hw)
322                 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
323                                    bytes, packets);
324         tm->lastuse = max_t(u64, tm->lastuse, lastuse);
325 }
326
327 static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
328                            int ref)
329 {
330         unsigned char *b = skb_tail_pointer(skb);
331         struct tcf_mirred *m = to_mirred(a);
332         struct tc_mirred opt = {
333                 .index   = m->tcf_index,
334                 .refcnt  = refcount_read(&m->tcf_refcnt) - ref,
335                 .bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
336         };
337         struct net_device *dev;
338         struct tcf_t t;
339
340         spin_lock_bh(&m->tcf_lock);
341         opt.action = m->tcf_action;
342         opt.eaction = m->tcfm_eaction;
343         dev = tcf_mirred_dev_dereference(m);
344         if (dev)
345                 opt.ifindex = dev->ifindex;
346
347         if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
348                 goto nla_put_failure;
349
350         tcf_tm_dump(&t, &m->tcf_tm);
351         if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
352                 goto nla_put_failure;
353         spin_unlock_bh(&m->tcf_lock);
354
355         return skb->len;
356
357 nla_put_failure:
358         spin_unlock_bh(&m->tcf_lock);
359         nlmsg_trim(skb, b);
360         return -1;
361 }
362
363 static int tcf_mirred_walker(struct net *net, struct sk_buff *skb,
364                              struct netlink_callback *cb, int type,
365                              const struct tc_action_ops *ops,
366                              struct netlink_ext_ack *extack)
367 {
368         struct tc_action_net *tn = net_generic(net, mirred_net_id);
369
370         return tcf_generic_walker(tn, skb, cb, type, ops, extack);
371 }
372
373 static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index)
374 {
375         struct tc_action_net *tn = net_generic(net, mirred_net_id);
376
377         return tcf_idr_search(tn, a, index);
378 }
379
380 static int mirred_device_event(struct notifier_block *unused,
381                                unsigned long event, void *ptr)
382 {
383         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
384         struct tcf_mirred *m;
385
386         ASSERT_RTNL();
387         if (event == NETDEV_UNREGISTER) {
388                 spin_lock(&mirred_list_lock);
389                 list_for_each_entry(m, &mirred_list, tcfm_list) {
390                         spin_lock_bh(&m->tcf_lock);
391                         if (tcf_mirred_dev_dereference(m) == dev) {
392                                 dev_put(dev);
393                                 /* Note : no rcu grace period necessary, as
394                                  * net_device are already rcu protected.
395                                  */
396                                 RCU_INIT_POINTER(m->tcfm_dev, NULL);
397                         }
398                         spin_unlock_bh(&m->tcf_lock);
399                 }
400                 spin_unlock(&mirred_list_lock);
401         }
402
403         return NOTIFY_DONE;
404 }
405
406 static struct notifier_block mirred_device_notifier = {
407         .notifier_call = mirred_device_event,
408 };
409
410 static struct net_device *tcf_mirred_get_dev(const struct tc_action *a)
411 {
412         struct tcf_mirred *m = to_mirred(a);
413         struct net_device *dev;
414
415         rcu_read_lock();
416         dev = rcu_dereference(m->tcfm_dev);
417         if (dev)
418                 dev_hold(dev);
419         rcu_read_unlock();
420
421         return dev;
422 }
423
424 static void tcf_mirred_put_dev(struct net_device *dev)
425 {
426         dev_put(dev);
427 }
428
429 static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
430 {
431         return nla_total_size(sizeof(struct tc_mirred));
432 }
433
434 static struct tc_action_ops act_mirred_ops = {
435         .kind           =       "mirred",
436         .id             =       TCA_ID_MIRRED,
437         .owner          =       THIS_MODULE,
438         .act            =       tcf_mirred_act,
439         .stats_update   =       tcf_stats_update,
440         .dump           =       tcf_mirred_dump,
441         .cleanup        =       tcf_mirred_release,
442         .init           =       tcf_mirred_init,
443         .walk           =       tcf_mirred_walker,
444         .lookup         =       tcf_mirred_search,
445         .get_fill_size  =       tcf_mirred_get_fill_size,
446         .size           =       sizeof(struct tcf_mirred),
447         .get_dev        =       tcf_mirred_get_dev,
448         .put_dev        =       tcf_mirred_put_dev,
449 };
450
451 static __net_init int mirred_init_net(struct net *net)
452 {
453         struct tc_action_net *tn = net_generic(net, mirred_net_id);
454
455         return tc_action_net_init(tn, &act_mirred_ops);
456 }
457
458 static void __net_exit mirred_exit_net(struct list_head *net_list)
459 {
460         tc_action_net_exit(net_list, mirred_net_id);
461 }
462
463 static struct pernet_operations mirred_net_ops = {
464         .init = mirred_init_net,
465         .exit_batch = mirred_exit_net,
466         .id   = &mirred_net_id,
467         .size = sizeof(struct tc_action_net),
468 };
469
470 MODULE_AUTHOR("Jamal Hadi Salim(2002)");
471 MODULE_DESCRIPTION("Device Mirror/redirect actions");
472 MODULE_LICENSE("GPL");
473
474 static int __init mirred_init_module(void)
475 {
476         int err = register_netdevice_notifier(&mirred_device_notifier);
477         if (err)
478                 return err;
479
480         pr_info("Mirror/redirect action on\n");
481         return tcf_register_action(&act_mirred_ops, &mirred_net_ops);
482 }
483
484 static void __exit mirred_cleanup_module(void)
485 {
486         tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
487         unregister_netdevice_notifier(&mirred_device_notifier);
488 }
489
490 module_init(mirred_init_module);
491 module_exit(mirred_cleanup_module);