2 * Copyright Gavin Shan, IBM Corporation 2016.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/netlink.h>
18 #include <net/net_namespace.h>
20 #include <net/addrconf.h>
22 #include <net/if_inet6.h>
27 LIST_HEAD(ncsi_dev_list);
28 DEFINE_SPINLOCK(ncsi_dev_lock);
30 static inline int ncsi_filter_size(int table)
32 int sizes[] = { 2, 6, 6, 6 };
34 BUILD_BUG_ON(ARRAY_SIZE(sizes) != NCSI_FILTER_MAX);
35 if (table < NCSI_FILTER_BASE || table >= NCSI_FILTER_MAX)
41 u32 *ncsi_get_filter(struct ncsi_channel *nc, int table, int index)
43 struct ncsi_channel_filter *ncf;
46 ncf = nc->filters[table];
50 size = ncsi_filter_size(table);
54 return ncf->data + size * index;
57 /* Find the first active filter in a filter table that matches the given
58 * data parameter. If data is NULL, this returns the first active filter.
60 int ncsi_find_filter(struct ncsi_channel *nc, int table, void *data)
62 struct ncsi_channel_filter *ncf;
67 ncf = nc->filters[table];
71 size = ncsi_filter_size(table);
75 spin_lock_irqsave(&nc->lock, flags);
76 bitmap = (void *)&ncf->bitmap;
78 while ((index = find_next_bit(bitmap, ncf->total, index + 1))
80 if (!data || !memcmp(ncf->data + size * index, data, size)) {
81 spin_unlock_irqrestore(&nc->lock, flags);
85 spin_unlock_irqrestore(&nc->lock, flags);
90 int ncsi_add_filter(struct ncsi_channel *nc, int table, void *data)
92 struct ncsi_channel_filter *ncf;
97 size = ncsi_filter_size(table);
101 index = ncsi_find_filter(nc, table, data);
105 ncf = nc->filters[table];
109 spin_lock_irqsave(&nc->lock, flags);
110 bitmap = (void *)&ncf->bitmap;
112 index = find_next_zero_bit(bitmap, ncf->total, 0);
113 if (index >= ncf->total) {
114 spin_unlock_irqrestore(&nc->lock, flags);
117 } while (test_and_set_bit(index, bitmap));
119 memcpy(ncf->data + size * index, data, size);
120 spin_unlock_irqrestore(&nc->lock, flags);
125 int ncsi_remove_filter(struct ncsi_channel *nc, int table, int index)
127 struct ncsi_channel_filter *ncf;
132 size = ncsi_filter_size(table);
136 ncf = nc->filters[table];
137 if (!ncf || index >= ncf->total)
140 spin_lock_irqsave(&nc->lock, flags);
141 bitmap = (void *)&ncf->bitmap;
142 if (test_and_clear_bit(index, bitmap))
143 memset(ncf->data + size * index, 0, size);
144 spin_unlock_irqrestore(&nc->lock, flags);
149 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
151 struct ncsi_dev *nd = &ndp->ndev;
152 struct ncsi_package *np;
153 struct ncsi_channel *nc;
156 nd->state = ncsi_dev_state_functional;
163 NCSI_FOR_EACH_PACKAGE(ndp, np) {
164 NCSI_FOR_EACH_CHANNEL(np, nc) {
165 spin_lock_irqsave(&nc->lock, flags);
167 if (!list_empty(&nc->link) ||
168 nc->state != NCSI_CHANNEL_ACTIVE) {
169 spin_unlock_irqrestore(&nc->lock, flags);
173 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
174 spin_unlock_irqrestore(&nc->lock, flags);
179 spin_unlock_irqrestore(&nc->lock, flags);
187 static void ncsi_channel_monitor(unsigned long data)
189 struct ncsi_channel *nc = (struct ncsi_channel *)data;
190 struct ncsi_package *np = nc->package;
191 struct ncsi_dev_priv *ndp = np->ndp;
192 struct ncsi_cmd_arg nca;
193 bool enabled, chained;
194 unsigned int monitor_state;
198 spin_lock_irqsave(&nc->lock, flags);
200 chained = !list_empty(&nc->link);
201 enabled = nc->monitor.enabled;
202 monitor_state = nc->monitor.state;
203 spin_unlock_irqrestore(&nc->lock, flags);
205 if (!enabled || chained)
207 if (state != NCSI_CHANNEL_INACTIVE &&
208 state != NCSI_CHANNEL_ACTIVE)
211 switch (monitor_state) {
212 case NCSI_CHANNEL_MONITOR_START:
213 case NCSI_CHANNEL_MONITOR_RETRY:
215 nca.package = np->id;
216 nca.channel = nc->id;
217 nca.type = NCSI_PKT_CMD_GLS;
219 ret = ncsi_xmit_cmd(&nca);
221 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
227 case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
230 if (!(ndp->flags & NCSI_DEV_HWA) &&
231 state == NCSI_CHANNEL_ACTIVE) {
232 ncsi_report_link(ndp, true);
233 ndp->flags |= NCSI_DEV_RESHUFFLE;
236 spin_lock_irqsave(&nc->lock, flags);
237 nc->state = NCSI_CHANNEL_INVISIBLE;
238 spin_unlock_irqrestore(&nc->lock, flags);
240 spin_lock_irqsave(&ndp->lock, flags);
241 nc->state = NCSI_CHANNEL_INACTIVE;
242 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
243 spin_unlock_irqrestore(&ndp->lock, flags);
244 ncsi_process_next_channel(ndp);
248 spin_lock_irqsave(&nc->lock, flags);
250 spin_unlock_irqrestore(&nc->lock, flags);
251 mod_timer(&nc->monitor.timer, jiffies + HZ);
254 void ncsi_start_channel_monitor(struct ncsi_channel *nc)
258 spin_lock_irqsave(&nc->lock, flags);
259 WARN_ON_ONCE(nc->monitor.enabled);
260 nc->monitor.enabled = true;
261 nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
262 spin_unlock_irqrestore(&nc->lock, flags);
264 mod_timer(&nc->monitor.timer, jiffies + HZ);
267 void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
271 spin_lock_irqsave(&nc->lock, flags);
272 if (!nc->monitor.enabled) {
273 spin_unlock_irqrestore(&nc->lock, flags);
276 nc->monitor.enabled = false;
277 spin_unlock_irqrestore(&nc->lock, flags);
279 del_timer_sync(&nc->monitor.timer);
282 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
285 struct ncsi_channel *nc;
287 NCSI_FOR_EACH_CHANNEL(np, nc) {
295 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
297 struct ncsi_channel *nc, *tmp;
301 nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
307 nc->state = NCSI_CHANNEL_INACTIVE;
308 nc->monitor.enabled = false;
309 setup_timer(&nc->monitor.timer,
310 ncsi_channel_monitor, (unsigned long)nc);
311 spin_lock_init(&nc->lock);
312 INIT_LIST_HEAD(&nc->link);
313 for (index = 0; index < NCSI_CAP_MAX; index++)
314 nc->caps[index].index = index;
315 for (index = 0; index < NCSI_MODE_MAX; index++)
316 nc->modes[index].index = index;
318 spin_lock_irqsave(&np->lock, flags);
319 tmp = ncsi_find_channel(np, id);
321 spin_unlock_irqrestore(&np->lock, flags);
326 list_add_tail_rcu(&nc->node, &np->channels);
328 spin_unlock_irqrestore(&np->lock, flags);
333 static void ncsi_remove_channel(struct ncsi_channel *nc)
335 struct ncsi_package *np = nc->package;
336 struct ncsi_channel_filter *ncf;
340 /* Release filters */
341 spin_lock_irqsave(&nc->lock, flags);
342 for (i = 0; i < NCSI_FILTER_MAX; i++) {
343 ncf = nc->filters[i];
347 nc->filters[i] = NULL;
351 nc->state = NCSI_CHANNEL_INACTIVE;
352 spin_unlock_irqrestore(&nc->lock, flags);
353 ncsi_stop_channel_monitor(nc);
355 /* Remove and free channel */
356 spin_lock_irqsave(&np->lock, flags);
357 list_del_rcu(&nc->node);
359 spin_unlock_irqrestore(&np->lock, flags);
364 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
367 struct ncsi_package *np;
369 NCSI_FOR_EACH_PACKAGE(ndp, np) {
377 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
380 struct ncsi_package *np, *tmp;
383 np = kzalloc(sizeof(*np), GFP_ATOMIC);
389 spin_lock_init(&np->lock);
390 INIT_LIST_HEAD(&np->channels);
392 spin_lock_irqsave(&ndp->lock, flags);
393 tmp = ncsi_find_package(ndp, id);
395 spin_unlock_irqrestore(&ndp->lock, flags);
400 list_add_tail_rcu(&np->node, &ndp->packages);
402 spin_unlock_irqrestore(&ndp->lock, flags);
407 void ncsi_remove_package(struct ncsi_package *np)
409 struct ncsi_dev_priv *ndp = np->ndp;
410 struct ncsi_channel *nc, *tmp;
413 /* Release all child channels */
414 list_for_each_entry_safe(nc, tmp, &np->channels, node)
415 ncsi_remove_channel(nc);
417 /* Remove and free package */
418 spin_lock_irqsave(&ndp->lock, flags);
419 list_del_rcu(&np->node);
421 spin_unlock_irqrestore(&ndp->lock, flags);
426 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
428 struct ncsi_package **np,
429 struct ncsi_channel **nc)
431 struct ncsi_package *p;
432 struct ncsi_channel *c;
434 p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
435 c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
443 /* For two consecutive NCSI commands, the packet IDs shouldn't
444 * be same. Otherwise, the bogus response might be replied. So
445 * the available IDs are allocated in round-robin fashion.
447 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
448 unsigned int req_flags)
450 struct ncsi_request *nr = NULL;
451 int i, limit = ARRAY_SIZE(ndp->requests);
454 /* Check if there is one available request until the ceiling */
455 spin_lock_irqsave(&ndp->lock, flags);
456 for (i = ndp->request_id; i < limit; i++) {
457 if (ndp->requests[i].used)
460 nr = &ndp->requests[i];
462 nr->flags = req_flags;
463 ndp->request_id = i + 1;
467 /* Fail back to check from the starting cursor */
468 for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
469 if (ndp->requests[i].used)
472 nr = &ndp->requests[i];
474 nr->flags = req_flags;
475 ndp->request_id = i + 1;
480 spin_unlock_irqrestore(&ndp->lock, flags);
484 void ncsi_free_request(struct ncsi_request *nr)
486 struct ncsi_dev_priv *ndp = nr->ndp;
487 struct sk_buff *cmd, *rsp;
493 del_timer_sync(&nr->timer);
496 spin_lock_irqsave(&ndp->lock, flags);
502 driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
503 spin_unlock_irqrestore(&ndp->lock, flags);
505 if (driven && cmd && --ndp->pending_req_num == 0)
506 schedule_work(&ndp->work);
508 /* Release command and response */
513 struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
515 struct ncsi_dev_priv *ndp;
517 NCSI_FOR_EACH_DEV(ndp) {
518 if (ndp->ndev.dev == dev)
525 static void ncsi_request_timeout(unsigned long data)
527 struct ncsi_request *nr = (struct ncsi_request *)data;
528 struct ncsi_dev_priv *ndp = nr->ndp;
531 /* If the request already had associated response,
532 * let the response handler to release it.
534 spin_lock_irqsave(&ndp->lock, flags);
536 if (nr->rsp || !nr->cmd) {
537 spin_unlock_irqrestore(&ndp->lock, flags);
540 spin_unlock_irqrestore(&ndp->lock, flags);
542 /* Release the request */
543 ncsi_free_request(nr);
546 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
548 struct ncsi_dev *nd = &ndp->ndev;
549 struct ncsi_package *np = ndp->active_package;
550 struct ncsi_channel *nc = ndp->active_channel;
551 struct ncsi_cmd_arg nca;
556 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
558 case ncsi_dev_state_suspend:
559 nd->state = ncsi_dev_state_suspend_select;
561 case ncsi_dev_state_suspend_select:
562 ndp->pending_req_num = 1;
564 nca.type = NCSI_PKT_CMD_SP;
565 nca.package = np->id;
566 nca.channel = NCSI_RESERVED_CHANNEL;
567 if (ndp->flags & NCSI_DEV_HWA)
572 /* To retrieve the last link states of channels in current
573 * package when current active channel needs fail over to
574 * another one. It means we will possibly select another
575 * channel as next active one. The link states of channels
576 * are most important factor of the selection. So we need
577 * accurate link states. Unfortunately, the link states on
578 * inactive channels can't be updated with LSC AEN in time.
580 if (ndp->flags & NCSI_DEV_RESHUFFLE)
581 nd->state = ncsi_dev_state_suspend_gls;
583 nd->state = ncsi_dev_state_suspend_dcnt;
584 ret = ncsi_xmit_cmd(&nca);
589 case ncsi_dev_state_suspend_gls:
590 ndp->pending_req_num = np->channel_num;
592 nca.type = NCSI_PKT_CMD_GLS;
593 nca.package = np->id;
595 nd->state = ncsi_dev_state_suspend_dcnt;
596 NCSI_FOR_EACH_CHANNEL(np, nc) {
597 nca.channel = nc->id;
598 ret = ncsi_xmit_cmd(&nca);
604 case ncsi_dev_state_suspend_dcnt:
605 ndp->pending_req_num = 1;
607 nca.type = NCSI_PKT_CMD_DCNT;
608 nca.package = np->id;
609 nca.channel = nc->id;
611 nd->state = ncsi_dev_state_suspend_dc;
612 ret = ncsi_xmit_cmd(&nca);
617 case ncsi_dev_state_suspend_dc:
618 ndp->pending_req_num = 1;
620 nca.type = NCSI_PKT_CMD_DC;
621 nca.package = np->id;
622 nca.channel = nc->id;
625 nd->state = ncsi_dev_state_suspend_deselect;
626 ret = ncsi_xmit_cmd(&nca);
631 case ncsi_dev_state_suspend_deselect:
632 ndp->pending_req_num = 1;
634 nca.type = NCSI_PKT_CMD_DP;
635 nca.package = np->id;
636 nca.channel = NCSI_RESERVED_CHANNEL;
638 nd->state = ncsi_dev_state_suspend_done;
639 ret = ncsi_xmit_cmd(&nca);
644 case ncsi_dev_state_suspend_done:
645 spin_lock_irqsave(&nc->lock, flags);
646 nc->state = NCSI_CHANNEL_INACTIVE;
647 spin_unlock_irqrestore(&nc->lock, flags);
648 ncsi_process_next_channel(ndp);
652 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
658 nd->state = ncsi_dev_state_functional;
661 /* Check the VLAN filter bitmap for a set filter, and construct a
662 * "Set VLAN Filter - Disable" packet if found.
664 static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
665 struct ncsi_cmd_arg *nca)
671 index = ncsi_find_filter(nc, NCSI_FILTER_VLAN, NULL);
673 /* Filter table empty */
677 data = ncsi_get_filter(nc, NCSI_FILTER_VLAN, index);
679 netdev_err(ndp->ndev.dev,
680 "ncsi: failed to retrieve filter %d\n", index);
681 /* Set the VLAN id to 0 - this will still disable the entry in
682 * the filter table, but we won't know what it was.
689 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
690 "ncsi: removed vlan tag %u at index %d\n",
692 ncsi_remove_filter(nc, NCSI_FILTER_VLAN, index);
694 nca->type = NCSI_PKT_CMD_SVF;
696 /* HW filter index starts at 1 */
697 nca->bytes[6] = index + 1;
698 nca->bytes[7] = 0x00;
702 /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
705 static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
706 struct ncsi_cmd_arg *nca)
708 struct vlan_vid *vlan = NULL;
711 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
712 index = ncsi_find_filter(nc, NCSI_FILTER_VLAN, &vlan->vid);
715 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
716 "ncsi: new vlan id to set: %u\n",
720 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
721 "vid %u already at filter pos %d\n",
725 if (!vlan || index >= 0) {
726 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
727 "no vlan ids left to set\n");
731 index = ncsi_add_filter(nc, NCSI_FILTER_VLAN, &vlan->vid);
733 netdev_err(ndp->ndev.dev,
734 "Failed to add new VLAN tag, error %d\n", index);
735 if (index == -ENOSPC)
736 netdev_err(ndp->ndev.dev,
737 "Channel %u already has all VLAN filters set\n",
742 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
743 "ncsi: set vid %u in packet, index %u\n",
744 vlan->vid, index + 1);
745 nca->type = NCSI_PKT_CMD_SVF;
746 nca->words[1] = vlan->vid;
747 /* HW filter index starts at 1 */
748 nca->bytes[6] = index + 1;
749 nca->bytes[7] = 0x01;
754 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
756 struct ncsi_dev *nd = &ndp->ndev;
757 struct net_device *dev = nd->dev;
758 struct ncsi_package *np = ndp->active_package;
759 struct ncsi_channel *nc = ndp->active_channel;
760 struct ncsi_channel *hot_nc = NULL;
761 struct ncsi_cmd_arg nca;
767 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
769 case ncsi_dev_state_config:
770 case ncsi_dev_state_config_sp:
771 ndp->pending_req_num = 1;
773 /* Select the specific package */
774 nca.type = NCSI_PKT_CMD_SP;
775 if (ndp->flags & NCSI_DEV_HWA)
779 nca.package = np->id;
780 nca.channel = NCSI_RESERVED_CHANNEL;
781 ret = ncsi_xmit_cmd(&nca);
785 nd->state = ncsi_dev_state_config_cis;
787 case ncsi_dev_state_config_cis:
788 ndp->pending_req_num = 1;
790 /* Clear initial state */
791 nca.type = NCSI_PKT_CMD_CIS;
792 nca.package = np->id;
793 nca.channel = nc->id;
794 ret = ncsi_xmit_cmd(&nca);
798 nd->state = ncsi_dev_state_config_clear_vids;
800 case ncsi_dev_state_config_clear_vids:
801 case ncsi_dev_state_config_svf:
802 case ncsi_dev_state_config_ev:
803 case ncsi_dev_state_config_sma:
804 case ncsi_dev_state_config_ebf:
805 #if IS_ENABLED(CONFIG_IPV6)
806 case ncsi_dev_state_config_egmf:
808 case ncsi_dev_state_config_ecnt:
809 case ncsi_dev_state_config_ec:
810 case ncsi_dev_state_config_ae:
811 case ncsi_dev_state_config_gls:
812 ndp->pending_req_num = 1;
814 nca.package = np->id;
815 nca.channel = nc->id;
817 /* Clear any active filters on the channel before setting */
818 if (nd->state == ncsi_dev_state_config_clear_vids) {
819 ret = clear_one_vid(ndp, nc, &nca);
821 nd->state = ncsi_dev_state_config_svf;
822 schedule_work(&ndp->work);
826 nd->state = ncsi_dev_state_config_clear_vids;
827 /* Add known VLAN tags to the filter */
828 } else if (nd->state == ncsi_dev_state_config_svf) {
829 ret = set_one_vid(ndp, nc, &nca);
831 nd->state = ncsi_dev_state_config_ev;
832 schedule_work(&ndp->work);
836 nd->state = ncsi_dev_state_config_svf;
837 /* Enable/Disable the VLAN filter */
838 } else if (nd->state == ncsi_dev_state_config_ev) {
839 if (list_empty(&ndp->vlan_vids)) {
840 nca.type = NCSI_PKT_CMD_DV;
842 nca.type = NCSI_PKT_CMD_EV;
843 nca.bytes[3] = NCSI_CAP_VLAN_NO;
845 nd->state = ncsi_dev_state_config_sma;
846 } else if (nd->state == ncsi_dev_state_config_sma) {
847 /* Use first entry in unicast filter table. Note that
848 * the MAC filter table starts from entry 1 instead of
851 nca.type = NCSI_PKT_CMD_SMA;
852 for (index = 0; index < 6; index++)
853 nca.bytes[index] = dev->dev_addr[index];
856 nd->state = ncsi_dev_state_config_ebf;
857 } else if (nd->state == ncsi_dev_state_config_ebf) {
858 nca.type = NCSI_PKT_CMD_EBF;
859 nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
860 nd->state = ncsi_dev_state_config_ecnt;
861 #if IS_ENABLED(CONFIG_IPV6)
862 if (ndp->inet6_addr_num > 0 &&
863 (nc->caps[NCSI_CAP_GENERIC].cap &
864 NCSI_CAP_GENERIC_MC))
865 nd->state = ncsi_dev_state_config_egmf;
867 nd->state = ncsi_dev_state_config_ecnt;
868 } else if (nd->state == ncsi_dev_state_config_egmf) {
869 nca.type = NCSI_PKT_CMD_EGMF;
870 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
871 nd->state = ncsi_dev_state_config_ecnt;
872 #endif /* CONFIG_IPV6 */
873 } else if (nd->state == ncsi_dev_state_config_ecnt) {
874 nca.type = NCSI_PKT_CMD_ECNT;
875 nd->state = ncsi_dev_state_config_ec;
876 } else if (nd->state == ncsi_dev_state_config_ec) {
877 /* Enable AEN if it's supported */
878 nca.type = NCSI_PKT_CMD_EC;
879 nd->state = ncsi_dev_state_config_ae;
880 if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
881 nd->state = ncsi_dev_state_config_gls;
882 } else if (nd->state == ncsi_dev_state_config_ae) {
883 nca.type = NCSI_PKT_CMD_AE;
885 nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
886 nd->state = ncsi_dev_state_config_gls;
887 } else if (nd->state == ncsi_dev_state_config_gls) {
888 nca.type = NCSI_PKT_CMD_GLS;
889 nd->state = ncsi_dev_state_config_done;
892 ret = ncsi_xmit_cmd(&nca);
896 case ncsi_dev_state_config_done:
897 spin_lock_irqsave(&nc->lock, flags);
898 if (nc->reconfigure_needed) {
899 /* This channel's configuration has been updated
900 * part-way during the config state - start the
901 * channel configuration over
903 nc->reconfigure_needed = false;
904 nc->state = NCSI_CHANNEL_INACTIVE;
905 spin_unlock_irqrestore(&nc->lock, flags);
907 spin_lock_irqsave(&ndp->lock, flags);
908 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
909 spin_unlock_irqrestore(&ndp->lock, flags);
911 netdev_printk(KERN_DEBUG, dev,
912 "Dirty NCSI channel state reset\n");
913 ncsi_process_next_channel(ndp);
917 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
919 nc->state = NCSI_CHANNEL_ACTIVE;
922 nc->state = NCSI_CHANNEL_INACTIVE;
924 spin_unlock_irqrestore(&nc->lock, flags);
926 /* Update the hot channel */
927 spin_lock_irqsave(&ndp->lock, flags);
928 ndp->hot_channel = hot_nc;
929 spin_unlock_irqrestore(&ndp->lock, flags);
931 ncsi_start_channel_monitor(nc);
932 ncsi_process_next_channel(ndp);
935 netdev_warn(dev, "Wrong NCSI state 0x%x in config\n",
942 ncsi_report_link(ndp, true);
945 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
947 struct ncsi_package *np;
948 struct ncsi_channel *nc, *found, *hot_nc;
949 struct ncsi_channel_mode *ncm;
952 spin_lock_irqsave(&ndp->lock, flags);
953 hot_nc = ndp->hot_channel;
954 spin_unlock_irqrestore(&ndp->lock, flags);
956 /* The search is done once an inactive channel with up
960 NCSI_FOR_EACH_PACKAGE(ndp, np) {
961 NCSI_FOR_EACH_CHANNEL(np, nc) {
962 spin_lock_irqsave(&nc->lock, flags);
964 if (!list_empty(&nc->link) ||
965 nc->state != NCSI_CHANNEL_INACTIVE) {
966 spin_unlock_irqrestore(&nc->lock, flags);
976 ncm = &nc->modes[NCSI_MODE_LINK];
977 if (ncm->data[2] & 0x1) {
978 spin_unlock_irqrestore(&nc->lock, flags);
983 spin_unlock_irqrestore(&nc->lock, flags);
988 ncsi_report_link(ndp, true);
993 spin_lock_irqsave(&ndp->lock, flags);
994 list_add_tail_rcu(&found->link, &ndp->channel_queue);
995 spin_unlock_irqrestore(&ndp->lock, flags);
997 return ncsi_process_next_channel(ndp);
1000 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1002 struct ncsi_package *np;
1003 struct ncsi_channel *nc;
1006 /* The hardware arbitration is disabled if any one channel
1007 * doesn't support explicitly.
1009 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1010 NCSI_FOR_EACH_CHANNEL(np, nc) {
1011 cap = nc->caps[NCSI_CAP_GENERIC].cap;
1012 if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1013 (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1014 NCSI_CAP_GENERIC_HWA_SUPPORT) {
1015 ndp->flags &= ~NCSI_DEV_HWA;
1021 ndp->flags |= NCSI_DEV_HWA;
1025 static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
1027 struct ncsi_package *np;
1028 struct ncsi_channel *nc;
1029 unsigned long flags;
1031 /* Move all available channels to processing queue */
1032 spin_lock_irqsave(&ndp->lock, flags);
1033 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1034 NCSI_FOR_EACH_CHANNEL(np, nc) {
1035 WARN_ON_ONCE(nc->state != NCSI_CHANNEL_INACTIVE ||
1036 !list_empty(&nc->link));
1037 ncsi_stop_channel_monitor(nc);
1038 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1041 spin_unlock_irqrestore(&ndp->lock, flags);
1043 /* We can have no channels in extremely case */
1044 if (list_empty(&ndp->channel_queue)) {
1045 ncsi_report_link(ndp, false);
1049 return ncsi_process_next_channel(ndp);
1052 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1054 struct ncsi_dev *nd = &ndp->ndev;
1055 struct ncsi_package *np;
1056 struct ncsi_channel *nc;
1057 struct ncsi_cmd_arg nca;
1058 unsigned char index;
1062 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1063 switch (nd->state) {
1064 case ncsi_dev_state_probe:
1065 nd->state = ncsi_dev_state_probe_deselect;
1067 case ncsi_dev_state_probe_deselect:
1068 ndp->pending_req_num = 8;
1070 /* Deselect all possible packages */
1071 nca.type = NCSI_PKT_CMD_DP;
1072 nca.channel = NCSI_RESERVED_CHANNEL;
1073 for (index = 0; index < 8; index++) {
1074 nca.package = index;
1075 ret = ncsi_xmit_cmd(&nca);
1080 nd->state = ncsi_dev_state_probe_package;
1082 case ncsi_dev_state_probe_package:
1083 ndp->pending_req_num = 16;
1085 /* Select all possible packages */
1086 nca.type = NCSI_PKT_CMD_SP;
1088 nca.channel = NCSI_RESERVED_CHANNEL;
1089 for (index = 0; index < 8; index++) {
1090 nca.package = index;
1091 ret = ncsi_xmit_cmd(&nca);
1096 /* Disable all possible packages */
1097 nca.type = NCSI_PKT_CMD_DP;
1098 for (index = 0; index < 8; index++) {
1099 nca.package = index;
1100 ret = ncsi_xmit_cmd(&nca);
1105 nd->state = ncsi_dev_state_probe_channel;
1107 case ncsi_dev_state_probe_channel:
1108 if (!ndp->active_package)
1109 ndp->active_package = list_first_or_null_rcu(
1110 &ndp->packages, struct ncsi_package, node);
1111 else if (list_is_last(&ndp->active_package->node,
1113 ndp->active_package = NULL;
1115 ndp->active_package = list_next_entry(
1116 ndp->active_package, node);
1118 /* All available packages and channels are enumerated. The
1119 * enumeration happens for once when the NCSI interface is
1120 * started. So we need continue to start the interface after
1123 * We have to choose an active channel before configuring it.
1124 * Note that we possibly don't have active channel in extreme
1127 if (!ndp->active_package) {
1128 ndp->flags |= NCSI_DEV_PROBED;
1129 if (ncsi_check_hwa(ndp))
1130 ncsi_enable_hwa(ndp);
1132 ncsi_choose_active_channel(ndp);
1136 /* Select the active package */
1137 ndp->pending_req_num = 1;
1138 nca.type = NCSI_PKT_CMD_SP;
1140 nca.package = ndp->active_package->id;
1141 nca.channel = NCSI_RESERVED_CHANNEL;
1142 ret = ncsi_xmit_cmd(&nca);
1146 nd->state = ncsi_dev_state_probe_cis;
1148 case ncsi_dev_state_probe_cis:
1149 ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
1151 /* Clear initial state */
1152 nca.type = NCSI_PKT_CMD_CIS;
1153 nca.package = ndp->active_package->id;
1154 for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
1155 nca.channel = index;
1156 ret = ncsi_xmit_cmd(&nca);
1161 nd->state = ncsi_dev_state_probe_gvi;
1163 case ncsi_dev_state_probe_gvi:
1164 case ncsi_dev_state_probe_gc:
1165 case ncsi_dev_state_probe_gls:
1166 np = ndp->active_package;
1167 ndp->pending_req_num = np->channel_num;
1169 /* Retrieve version, capability or link status */
1170 if (nd->state == ncsi_dev_state_probe_gvi)
1171 nca.type = NCSI_PKT_CMD_GVI;
1172 else if (nd->state == ncsi_dev_state_probe_gc)
1173 nca.type = NCSI_PKT_CMD_GC;
1175 nca.type = NCSI_PKT_CMD_GLS;
1177 nca.package = np->id;
1178 NCSI_FOR_EACH_CHANNEL(np, nc) {
1179 nca.channel = nc->id;
1180 ret = ncsi_xmit_cmd(&nca);
1185 if (nd->state == ncsi_dev_state_probe_gvi)
1186 nd->state = ncsi_dev_state_probe_gc;
1187 else if (nd->state == ncsi_dev_state_probe_gc)
1188 nd->state = ncsi_dev_state_probe_gls;
1190 nd->state = ncsi_dev_state_probe_dp;
1192 case ncsi_dev_state_probe_dp:
1193 ndp->pending_req_num = 1;
1195 /* Deselect the active package */
1196 nca.type = NCSI_PKT_CMD_DP;
1197 nca.package = ndp->active_package->id;
1198 nca.channel = NCSI_RESERVED_CHANNEL;
1199 ret = ncsi_xmit_cmd(&nca);
1203 /* Scan channels in next package */
1204 nd->state = ncsi_dev_state_probe_channel;
1207 netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1213 ncsi_report_link(ndp, true);
1216 static void ncsi_dev_work(struct work_struct *work)
1218 struct ncsi_dev_priv *ndp = container_of(work,
1219 struct ncsi_dev_priv, work);
1220 struct ncsi_dev *nd = &ndp->ndev;
1222 switch (nd->state & ncsi_dev_state_major) {
1223 case ncsi_dev_state_probe:
1224 ncsi_probe_channel(ndp);
1226 case ncsi_dev_state_suspend:
1227 ncsi_suspend_channel(ndp);
1229 case ncsi_dev_state_config:
1230 ncsi_configure_channel(ndp);
1233 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1238 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1240 struct ncsi_channel *nc;
1242 unsigned long flags;
1244 spin_lock_irqsave(&ndp->lock, flags);
1245 nc = list_first_or_null_rcu(&ndp->channel_queue,
1246 struct ncsi_channel, link);
1248 spin_unlock_irqrestore(&ndp->lock, flags);
1252 list_del_init(&nc->link);
1253 spin_unlock_irqrestore(&ndp->lock, flags);
1255 spin_lock_irqsave(&nc->lock, flags);
1256 old_state = nc->state;
1257 nc->state = NCSI_CHANNEL_INVISIBLE;
1258 spin_unlock_irqrestore(&nc->lock, flags);
1260 ndp->active_channel = nc;
1261 ndp->active_package = nc->package;
1263 switch (old_state) {
1264 case NCSI_CHANNEL_INACTIVE:
1265 ndp->ndev.state = ncsi_dev_state_config;
1266 ncsi_configure_channel(ndp);
1268 case NCSI_CHANNEL_ACTIVE:
1269 ndp->ndev.state = ncsi_dev_state_suspend;
1270 ncsi_suspend_channel(ndp);
1273 netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1274 old_state, nc->package->id, nc->id);
1275 ncsi_report_link(ndp, false);
1282 ndp->active_channel = NULL;
1283 ndp->active_package = NULL;
1284 if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1285 ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1286 return ncsi_choose_active_channel(ndp);
1289 ncsi_report_link(ndp, false);
1293 #if IS_ENABLED(CONFIG_IPV6)
1294 static int ncsi_inet6addr_event(struct notifier_block *this,
1295 unsigned long event, void *data)
1297 struct inet6_ifaddr *ifa = data;
1298 struct net_device *dev = ifa->idev->dev;
1299 struct ncsi_dev *nd = ncsi_find_dev(dev);
1300 struct ncsi_dev_priv *ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
1301 struct ncsi_package *np;
1302 struct ncsi_channel *nc;
1303 struct ncsi_cmd_arg nca;
1307 if (!ndp || (ipv6_addr_type(&ifa->addr) &
1308 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK)))
1313 action = (++ndp->inet6_addr_num) == 1;
1314 nca.type = NCSI_PKT_CMD_EGMF;
1317 action = (--ndp->inet6_addr_num == 0);
1318 nca.type = NCSI_PKT_CMD_DGMF;
1324 /* We might not have active channel or packages. The IPv6
1325 * required multicast will be enabled when active channel
1326 * or packages are chosen.
1328 np = ndp->active_package;
1329 nc = ndp->active_channel;
1330 if (!action || !np || !nc)
1333 /* We needn't enable or disable it if the function isn't supported */
1334 if (!(nc->caps[NCSI_CAP_GENERIC].cap & NCSI_CAP_GENERIC_MC))
1339 nca.package = np->id;
1340 nca.channel = nc->id;
1341 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
1342 ret = ncsi_xmit_cmd(&nca);
1344 netdev_warn(dev, "Fail to %s global multicast filter (%d)\n",
1345 (event == NETDEV_UP) ? "enable" : "disable", ret);
1352 static struct notifier_block ncsi_inet6addr_notifier = {
1353 .notifier_call = ncsi_inet6addr_event,
1355 #endif /* CONFIG_IPV6 */
1357 static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1359 struct ncsi_dev *nd = &ndp->ndev;
1360 struct ncsi_channel *nc;
1361 struct ncsi_package *np;
1362 unsigned long flags;
1365 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1366 NCSI_FOR_EACH_CHANNEL(np, nc) {
1367 spin_lock_irqsave(&nc->lock, flags);
1369 /* Channels may be busy, mark dirty instead of
1371 * a) not ACTIVE (configured)
1372 * b) in the channel_queue (to be configured)
1373 * c) it's ndev is in the config state
1375 if (nc->state != NCSI_CHANNEL_ACTIVE) {
1376 if ((ndp->ndev.state & 0xff00) ==
1377 ncsi_dev_state_config ||
1378 !list_empty(&nc->link)) {
1379 netdev_printk(KERN_DEBUG, nd->dev,
1380 "ncsi: channel %p marked dirty\n",
1382 nc->reconfigure_needed = true;
1384 spin_unlock_irqrestore(&nc->lock, flags);
1388 spin_unlock_irqrestore(&nc->lock, flags);
1390 ncsi_stop_channel_monitor(nc);
1391 spin_lock_irqsave(&nc->lock, flags);
1392 nc->state = NCSI_CHANNEL_INACTIVE;
1393 spin_unlock_irqrestore(&nc->lock, flags);
1395 spin_lock_irqsave(&ndp->lock, flags);
1396 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1397 spin_unlock_irqrestore(&ndp->lock, flags);
1399 netdev_printk(KERN_DEBUG, nd->dev,
1400 "ncsi: kicked channel %p\n", nc);
1408 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1410 struct ncsi_dev_priv *ndp;
1411 unsigned int n_vids = 0;
1412 struct vlan_vid *vlan;
1413 struct ncsi_dev *nd;
1419 nd = ncsi_find_dev(dev);
1421 netdev_warn(dev, "ncsi: No net_device?\n");
1425 ndp = TO_NCSI_DEV_PRIV(nd);
1427 /* Add the VLAN id to our internal list */
1428 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1430 if (vlan->vid == vid) {
1431 netdev_printk(KERN_DEBUG, dev,
1432 "vid %u already registered\n", vid);
1436 if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1438 "tried to add vlan id %u but NCSI max already registered (%u)\n",
1439 vid, NCSI_MAX_VLAN_VIDS);
1443 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1447 vlan->proto = proto;
1449 list_add_rcu(&vlan->list, &ndp->vlan_vids);
1451 netdev_printk(KERN_DEBUG, dev, "Added new vid %u\n", vid);
1453 found = ncsi_kick_channels(ndp) != 0;
1455 return found ? ncsi_process_next_channel(ndp) : 0;
1457 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1459 int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1461 struct vlan_vid *vlan, *tmp;
1462 struct ncsi_dev_priv *ndp;
1463 struct ncsi_dev *nd;
1469 nd = ncsi_find_dev(dev);
1471 netdev_warn(dev, "ncsi: no net_device?\n");
1475 ndp = TO_NCSI_DEV_PRIV(nd);
1477 /* Remove the VLAN id from our internal list */
1478 list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1479 if (vlan->vid == vid) {
1480 netdev_printk(KERN_DEBUG, dev,
1481 "vid %u found, removing\n", vid);
1482 list_del_rcu(&vlan->list);
1488 netdev_err(dev, "ncsi: vid %u wasn't registered!\n", vid);
1492 found = ncsi_kick_channels(ndp) != 0;
1494 return found ? ncsi_process_next_channel(ndp) : 0;
1496 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1498 struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1499 void (*handler)(struct ncsi_dev *ndev))
1501 struct ncsi_dev_priv *ndp;
1502 struct ncsi_dev *nd;
1503 unsigned long flags;
1506 /* Check if the device has been registered or not */
1507 nd = ncsi_find_dev(dev);
1511 /* Create NCSI device */
1512 ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1517 nd->state = ncsi_dev_state_registered;
1519 nd->handler = handler;
1520 ndp->pending_req_num = 0;
1521 INIT_LIST_HEAD(&ndp->channel_queue);
1522 INIT_LIST_HEAD(&ndp->vlan_vids);
1523 INIT_WORK(&ndp->work, ncsi_dev_work);
1525 /* Initialize private NCSI device */
1526 spin_lock_init(&ndp->lock);
1527 INIT_LIST_HEAD(&ndp->packages);
1528 ndp->request_id = NCSI_REQ_START_IDX;
1529 for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1530 ndp->requests[i].id = i;
1531 ndp->requests[i].ndp = ndp;
1532 setup_timer(&ndp->requests[i].timer,
1533 ncsi_request_timeout,
1534 (unsigned long)&ndp->requests[i]);
1537 spin_lock_irqsave(&ncsi_dev_lock, flags);
1538 #if IS_ENABLED(CONFIG_IPV6)
1539 ndp->inet6_addr_num = 0;
1540 if (list_empty(&ncsi_dev_list))
1541 register_inet6addr_notifier(&ncsi_inet6addr_notifier);
1543 list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1544 spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1546 /* Register NCSI packet Rx handler */
1547 ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1548 ndp->ptype.func = ncsi_rcv_rsp;
1549 ndp->ptype.dev = dev;
1550 dev_add_pack(&ndp->ptype);
1554 EXPORT_SYMBOL_GPL(ncsi_register_dev);
1556 int ncsi_start_dev(struct ncsi_dev *nd)
1558 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1561 if (nd->state != ncsi_dev_state_registered &&
1562 nd->state != ncsi_dev_state_functional)
1565 if (!(ndp->flags & NCSI_DEV_PROBED)) {
1566 nd->state = ncsi_dev_state_probe;
1567 schedule_work(&ndp->work);
1571 if (ndp->flags & NCSI_DEV_HWA)
1572 ret = ncsi_enable_hwa(ndp);
1574 ret = ncsi_choose_active_channel(ndp);
1578 EXPORT_SYMBOL_GPL(ncsi_start_dev);
1580 void ncsi_stop_dev(struct ncsi_dev *nd)
1582 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1583 struct ncsi_package *np;
1584 struct ncsi_channel *nc;
1587 unsigned long flags;
1589 /* Stop the channel monitor and reset channel's state */
1590 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1591 NCSI_FOR_EACH_CHANNEL(np, nc) {
1592 ncsi_stop_channel_monitor(nc);
1594 spin_lock_irqsave(&nc->lock, flags);
1595 chained = !list_empty(&nc->link);
1596 old_state = nc->state;
1597 nc->state = NCSI_CHANNEL_INACTIVE;
1598 spin_unlock_irqrestore(&nc->lock, flags);
1600 WARN_ON_ONCE(chained ||
1601 old_state == NCSI_CHANNEL_INVISIBLE);
1605 ncsi_report_link(ndp, true);
1607 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1609 void ncsi_unregister_dev(struct ncsi_dev *nd)
1611 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1612 struct ncsi_package *np, *tmp;
1613 unsigned long flags;
1615 dev_remove_pack(&ndp->ptype);
1617 list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1618 ncsi_remove_package(np);
1620 spin_lock_irqsave(&ncsi_dev_lock, flags);
1621 list_del_rcu(&ndp->node);
1622 #if IS_ENABLED(CONFIG_IPV6)
1623 if (list_empty(&ncsi_dev_list))
1624 unregister_inet6addr_notifier(&ncsi_inet6addr_notifier);
1626 spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1630 EXPORT_SYMBOL_GPL(ncsi_unregister_dev);