2 * Copyright Gavin Shan, IBM Corporation 2016.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/netlink.h>
18 #include <net/net_namespace.h>
20 #include <net/addrconf.h>
22 #include <net/if_inet6.h>
27 LIST_HEAD(ncsi_dev_list);
28 DEFINE_SPINLOCK(ncsi_dev_lock);
30 static inline int ncsi_filter_size(int table)
32 int sizes[] = { 2, 6, 6, 6 };
34 BUILD_BUG_ON(ARRAY_SIZE(sizes) != NCSI_FILTER_MAX);
35 if (table < NCSI_FILTER_BASE || table >= NCSI_FILTER_MAX)
41 u32 *ncsi_get_filter(struct ncsi_channel *nc, int table, int index)
43 struct ncsi_channel_filter *ncf;
46 ncf = nc->filters[table];
50 size = ncsi_filter_size(table);
54 return ncf->data + size * index;
57 /* Find the first active filter in a filter table that matches the given
58 * data parameter. If data is NULL, this returns the first active filter.
60 int ncsi_find_filter(struct ncsi_channel *nc, int table, void *data)
62 struct ncsi_channel_filter *ncf;
67 ncf = nc->filters[table];
71 size = ncsi_filter_size(table);
75 spin_lock_irqsave(&nc->lock, flags);
76 bitmap = (void *)&ncf->bitmap;
78 while ((index = find_next_bit(bitmap, ncf->total, index + 1))
80 if (!data || !memcmp(ncf->data + size * index, data, size)) {
81 spin_unlock_irqrestore(&nc->lock, flags);
85 spin_unlock_irqrestore(&nc->lock, flags);
90 int ncsi_add_filter(struct ncsi_channel *nc, int table, void *data)
92 struct ncsi_channel_filter *ncf;
97 size = ncsi_filter_size(table);
101 index = ncsi_find_filter(nc, table, data);
105 ncf = nc->filters[table];
109 spin_lock_irqsave(&nc->lock, flags);
110 bitmap = (void *)&ncf->bitmap;
112 index = find_next_zero_bit(bitmap, ncf->total, 0);
113 if (index >= ncf->total) {
114 spin_unlock_irqrestore(&nc->lock, flags);
117 } while (test_and_set_bit(index, bitmap));
119 memcpy(ncf->data + size * index, data, size);
120 spin_unlock_irqrestore(&nc->lock, flags);
125 int ncsi_remove_filter(struct ncsi_channel *nc, int table, int index)
127 struct ncsi_channel_filter *ncf;
132 size = ncsi_filter_size(table);
136 ncf = nc->filters[table];
137 if (!ncf || index >= ncf->total)
140 spin_lock_irqsave(&nc->lock, flags);
141 bitmap = (void *)&ncf->bitmap;
142 if (test_and_clear_bit(index, bitmap))
143 memset(ncf->data + size * index, 0, size);
144 spin_unlock_irqrestore(&nc->lock, flags);
149 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
151 struct ncsi_dev *nd = &ndp->ndev;
152 struct ncsi_package *np;
153 struct ncsi_channel *nc;
156 nd->state = ncsi_dev_state_functional;
163 NCSI_FOR_EACH_PACKAGE(ndp, np) {
164 NCSI_FOR_EACH_CHANNEL(np, nc) {
165 spin_lock_irqsave(&nc->lock, flags);
167 if (!list_empty(&nc->link) ||
168 nc->state != NCSI_CHANNEL_ACTIVE) {
169 spin_unlock_irqrestore(&nc->lock, flags);
173 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
174 spin_unlock_irqrestore(&nc->lock, flags);
179 spin_unlock_irqrestore(&nc->lock, flags);
187 static void ncsi_channel_monitor(unsigned long data)
189 struct ncsi_channel *nc = (struct ncsi_channel *)data;
190 struct ncsi_package *np = nc->package;
191 struct ncsi_dev_priv *ndp = np->ndp;
192 struct ncsi_cmd_arg nca;
193 bool enabled, chained;
194 unsigned int monitor_state;
198 spin_lock_irqsave(&nc->lock, flags);
200 chained = !list_empty(&nc->link);
201 enabled = nc->monitor.enabled;
202 monitor_state = nc->monitor.state;
203 spin_unlock_irqrestore(&nc->lock, flags);
205 if (!enabled || chained)
207 if (state != NCSI_CHANNEL_INACTIVE &&
208 state != NCSI_CHANNEL_ACTIVE)
211 switch (monitor_state) {
212 case NCSI_CHANNEL_MONITOR_START:
213 case NCSI_CHANNEL_MONITOR_RETRY:
215 nca.package = np->id;
216 nca.channel = nc->id;
217 nca.type = NCSI_PKT_CMD_GLS;
219 ret = ncsi_xmit_cmd(&nca);
221 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
227 case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
230 if (!(ndp->flags & NCSI_DEV_HWA) &&
231 state == NCSI_CHANNEL_ACTIVE) {
232 ncsi_report_link(ndp, true);
233 ndp->flags |= NCSI_DEV_RESHUFFLE;
236 spin_lock_irqsave(&nc->lock, flags);
237 nc->state = NCSI_CHANNEL_INVISIBLE;
238 spin_unlock_irqrestore(&nc->lock, flags);
240 spin_lock_irqsave(&ndp->lock, flags);
241 nc->state = NCSI_CHANNEL_INACTIVE;
242 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
243 spin_unlock_irqrestore(&ndp->lock, flags);
244 ncsi_process_next_channel(ndp);
248 spin_lock_irqsave(&nc->lock, flags);
250 spin_unlock_irqrestore(&nc->lock, flags);
251 mod_timer(&nc->monitor.timer, jiffies + HZ);
254 void ncsi_start_channel_monitor(struct ncsi_channel *nc)
258 spin_lock_irqsave(&nc->lock, flags);
259 WARN_ON_ONCE(nc->monitor.enabled);
260 nc->monitor.enabled = true;
261 nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
262 spin_unlock_irqrestore(&nc->lock, flags);
264 mod_timer(&nc->monitor.timer, jiffies + HZ);
267 void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
271 spin_lock_irqsave(&nc->lock, flags);
272 if (!nc->monitor.enabled) {
273 spin_unlock_irqrestore(&nc->lock, flags);
276 nc->monitor.enabled = false;
277 spin_unlock_irqrestore(&nc->lock, flags);
279 del_timer_sync(&nc->monitor.timer);
282 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
285 struct ncsi_channel *nc;
287 NCSI_FOR_EACH_CHANNEL(np, nc) {
295 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
297 struct ncsi_channel *nc, *tmp;
301 nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
307 nc->state = NCSI_CHANNEL_INACTIVE;
308 nc->monitor.enabled = false;
309 setup_timer(&nc->monitor.timer,
310 ncsi_channel_monitor, (unsigned long)nc);
311 spin_lock_init(&nc->lock);
312 INIT_LIST_HEAD(&nc->link);
313 for (index = 0; index < NCSI_CAP_MAX; index++)
314 nc->caps[index].index = index;
315 for (index = 0; index < NCSI_MODE_MAX; index++)
316 nc->modes[index].index = index;
318 spin_lock_irqsave(&np->lock, flags);
319 tmp = ncsi_find_channel(np, id);
321 spin_unlock_irqrestore(&np->lock, flags);
326 list_add_tail_rcu(&nc->node, &np->channels);
328 spin_unlock_irqrestore(&np->lock, flags);
333 static void ncsi_remove_channel(struct ncsi_channel *nc)
335 struct ncsi_package *np = nc->package;
336 struct ncsi_channel_filter *ncf;
340 /* Release filters */
341 spin_lock_irqsave(&nc->lock, flags);
342 for (i = 0; i < NCSI_FILTER_MAX; i++) {
343 ncf = nc->filters[i];
347 nc->filters[i] = NULL;
351 nc->state = NCSI_CHANNEL_INACTIVE;
352 spin_unlock_irqrestore(&nc->lock, flags);
353 ncsi_stop_channel_monitor(nc);
355 /* Remove and free channel */
356 spin_lock_irqsave(&np->lock, flags);
357 list_del_rcu(&nc->node);
359 spin_unlock_irqrestore(&np->lock, flags);
364 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
367 struct ncsi_package *np;
369 NCSI_FOR_EACH_PACKAGE(ndp, np) {
377 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
380 struct ncsi_package *np, *tmp;
383 np = kzalloc(sizeof(*np), GFP_ATOMIC);
389 spin_lock_init(&np->lock);
390 INIT_LIST_HEAD(&np->channels);
392 spin_lock_irqsave(&ndp->lock, flags);
393 tmp = ncsi_find_package(ndp, id);
395 spin_unlock_irqrestore(&ndp->lock, flags);
400 list_add_tail_rcu(&np->node, &ndp->packages);
402 spin_unlock_irqrestore(&ndp->lock, flags);
407 void ncsi_remove_package(struct ncsi_package *np)
409 struct ncsi_dev_priv *ndp = np->ndp;
410 struct ncsi_channel *nc, *tmp;
413 /* Release all child channels */
414 list_for_each_entry_safe(nc, tmp, &np->channels, node)
415 ncsi_remove_channel(nc);
417 /* Remove and free package */
418 spin_lock_irqsave(&ndp->lock, flags);
419 list_del_rcu(&np->node);
421 spin_unlock_irqrestore(&ndp->lock, flags);
426 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
428 struct ncsi_package **np,
429 struct ncsi_channel **nc)
431 struct ncsi_package *p;
432 struct ncsi_channel *c;
434 p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
435 c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
443 /* For two consecutive NCSI commands, the packet IDs shouldn't
444 * be same. Otherwise, the bogus response might be replied. So
445 * the available IDs are allocated in round-robin fashion.
447 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
448 unsigned int req_flags)
450 struct ncsi_request *nr = NULL;
451 int i, limit = ARRAY_SIZE(ndp->requests);
454 /* Check if there is one available request until the ceiling */
455 spin_lock_irqsave(&ndp->lock, flags);
456 for (i = ndp->request_id; i < limit; i++) {
457 if (ndp->requests[i].used)
460 nr = &ndp->requests[i];
462 nr->flags = req_flags;
463 ndp->request_id = i + 1;
467 /* Fail back to check from the starting cursor */
468 for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
469 if (ndp->requests[i].used)
472 nr = &ndp->requests[i];
474 nr->flags = req_flags;
475 ndp->request_id = i + 1;
480 spin_unlock_irqrestore(&ndp->lock, flags);
484 void ncsi_free_request(struct ncsi_request *nr)
486 struct ncsi_dev_priv *ndp = nr->ndp;
487 struct sk_buff *cmd, *rsp;
493 del_timer_sync(&nr->timer);
496 spin_lock_irqsave(&ndp->lock, flags);
502 driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
503 spin_unlock_irqrestore(&ndp->lock, flags);
505 if (driven && cmd && --ndp->pending_req_num == 0)
506 schedule_work(&ndp->work);
508 /* Release command and response */
513 struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
515 struct ncsi_dev_priv *ndp;
517 NCSI_FOR_EACH_DEV(ndp) {
518 if (ndp->ndev.dev == dev)
525 static void ncsi_request_timeout(unsigned long data)
527 struct ncsi_request *nr = (struct ncsi_request *)data;
528 struct ncsi_dev_priv *ndp = nr->ndp;
531 /* If the request already had associated response,
532 * let the response handler to release it.
534 spin_lock_irqsave(&ndp->lock, flags);
536 if (nr->rsp || !nr->cmd) {
537 spin_unlock_irqrestore(&ndp->lock, flags);
540 spin_unlock_irqrestore(&ndp->lock, flags);
542 /* Release the request */
543 ncsi_free_request(nr);
546 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
548 struct ncsi_dev *nd = &ndp->ndev;
549 struct ncsi_package *np = ndp->active_package;
550 struct ncsi_channel *nc = ndp->active_channel;
551 struct ncsi_cmd_arg nca;
556 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
558 case ncsi_dev_state_suspend:
559 nd->state = ncsi_dev_state_suspend_select;
561 case ncsi_dev_state_suspend_select:
562 ndp->pending_req_num = 1;
564 nca.type = NCSI_PKT_CMD_SP;
565 nca.package = np->id;
566 nca.channel = NCSI_RESERVED_CHANNEL;
567 if (ndp->flags & NCSI_DEV_HWA)
572 /* To retrieve the last link states of channels in current
573 * package when current active channel needs fail over to
574 * another one. It means we will possibly select another
575 * channel as next active one. The link states of channels
576 * are most important factor of the selection. So we need
577 * accurate link states. Unfortunately, the link states on
578 * inactive channels can't be updated with LSC AEN in time.
580 if (ndp->flags & NCSI_DEV_RESHUFFLE)
581 nd->state = ncsi_dev_state_suspend_gls;
583 nd->state = ncsi_dev_state_suspend_dcnt;
584 ret = ncsi_xmit_cmd(&nca);
589 case ncsi_dev_state_suspend_gls:
590 ndp->pending_req_num = np->channel_num;
592 nca.type = NCSI_PKT_CMD_GLS;
593 nca.package = np->id;
595 nd->state = ncsi_dev_state_suspend_dcnt;
596 NCSI_FOR_EACH_CHANNEL(np, nc) {
597 nca.channel = nc->id;
598 ret = ncsi_xmit_cmd(&nca);
604 case ncsi_dev_state_suspend_dcnt:
605 ndp->pending_req_num = 1;
607 nca.type = NCSI_PKT_CMD_DCNT;
608 nca.package = np->id;
609 nca.channel = nc->id;
611 nd->state = ncsi_dev_state_suspend_dc;
612 ret = ncsi_xmit_cmd(&nca);
617 case ncsi_dev_state_suspend_dc:
618 ndp->pending_req_num = 1;
620 nca.type = NCSI_PKT_CMD_DC;
621 nca.package = np->id;
622 nca.channel = nc->id;
625 nd->state = ncsi_dev_state_suspend_deselect;
626 ret = ncsi_xmit_cmd(&nca);
631 case ncsi_dev_state_suspend_deselect:
632 ndp->pending_req_num = 1;
634 nca.type = NCSI_PKT_CMD_DP;
635 nca.package = np->id;
636 nca.channel = NCSI_RESERVED_CHANNEL;
638 nd->state = ncsi_dev_state_suspend_done;
639 ret = ncsi_xmit_cmd(&nca);
644 case ncsi_dev_state_suspend_done:
645 spin_lock_irqsave(&nc->lock, flags);
646 nc->state = NCSI_CHANNEL_INACTIVE;
647 spin_unlock_irqrestore(&nc->lock, flags);
648 ncsi_process_next_channel(ndp);
652 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
658 nd->state = ncsi_dev_state_functional;
661 /* Check the VLAN filter bitmap for a set filter, and construct a
662 * "Set VLAN Filter - Disable" packet if found.
664 static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
665 struct ncsi_cmd_arg *nca)
671 index = ncsi_find_filter(nc, NCSI_FILTER_VLAN, NULL);
673 /* Filter table empty */
677 data = ncsi_get_filter(nc, NCSI_FILTER_VLAN, index);
679 netdev_err(ndp->ndev.dev,
680 "ncsi: failed to retrieve filter %d\n", index);
681 /* Set the VLAN id to 0 - this will still disable the entry in
682 * the filter table, but we won't know what it was.
689 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
690 "ncsi: removed vlan tag %u at index %d\n",
692 ncsi_remove_filter(nc, NCSI_FILTER_VLAN, index);
694 nca->type = NCSI_PKT_CMD_SVF;
696 /* HW filter index starts at 1 */
697 nca->bytes[6] = index + 1;
698 nca->bytes[7] = 0x00;
702 /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
705 static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
706 struct ncsi_cmd_arg *nca)
708 struct vlan_vid *vlan = NULL;
711 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
712 index = ncsi_find_filter(nc, NCSI_FILTER_VLAN, &vlan->vid);
715 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
716 "ncsi: new vlan id to set: %u\n",
720 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
721 "vid %u already at filter pos %d\n",
725 if (!vlan || index >= 0) {
726 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
727 "no vlan ids left to set\n");
731 index = ncsi_add_filter(nc, NCSI_FILTER_VLAN, &vlan->vid);
733 netdev_err(ndp->ndev.dev,
734 "Failed to add new VLAN tag, error %d\n", index);
738 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
739 "ncsi: set vid %u in packet, index %u\n",
740 vlan->vid, index + 1);
741 nca->type = NCSI_PKT_CMD_SVF;
742 nca->words[1] = vlan->vid;
743 /* HW filter index starts at 1 */
744 nca->bytes[6] = index + 1;
745 nca->bytes[7] = 0x01;
750 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
752 struct ncsi_dev *nd = &ndp->ndev;
753 struct net_device *dev = nd->dev;
754 struct ncsi_package *np = ndp->active_package;
755 struct ncsi_channel *nc = ndp->active_channel;
756 struct ncsi_channel *hot_nc = NULL;
757 struct ncsi_cmd_arg nca;
763 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
765 case ncsi_dev_state_config:
766 case ncsi_dev_state_config_sp:
767 ndp->pending_req_num = 1;
769 /* Select the specific package */
770 nca.type = NCSI_PKT_CMD_SP;
771 if (ndp->flags & NCSI_DEV_HWA)
775 nca.package = np->id;
776 nca.channel = NCSI_RESERVED_CHANNEL;
777 ret = ncsi_xmit_cmd(&nca);
781 nd->state = ncsi_dev_state_config_cis;
783 case ncsi_dev_state_config_cis:
784 ndp->pending_req_num = 1;
786 /* Clear initial state */
787 nca.type = NCSI_PKT_CMD_CIS;
788 nca.package = np->id;
789 nca.channel = nc->id;
790 ret = ncsi_xmit_cmd(&nca);
794 nd->state = ncsi_dev_state_config_clear_vids;
796 case ncsi_dev_state_config_clear_vids:
797 case ncsi_dev_state_config_svf:
798 case ncsi_dev_state_config_ev:
799 case ncsi_dev_state_config_sma:
800 case ncsi_dev_state_config_ebf:
801 #if IS_ENABLED(CONFIG_IPV6)
802 case ncsi_dev_state_config_egmf:
804 case ncsi_dev_state_config_ecnt:
805 case ncsi_dev_state_config_ec:
806 case ncsi_dev_state_config_ae:
807 case ncsi_dev_state_config_gls:
808 ndp->pending_req_num = 1;
810 nca.package = np->id;
811 nca.channel = nc->id;
813 /* Clear any active filters on the channel before setting */
814 if (nd->state == ncsi_dev_state_config_clear_vids) {
815 ret = clear_one_vid(ndp, nc, &nca);
817 nd->state = ncsi_dev_state_config_svf;
818 schedule_work(&ndp->work);
822 nd->state = ncsi_dev_state_config_clear_vids;
823 /* Add known VLAN tags to the filter */
824 } else if (nd->state == ncsi_dev_state_config_svf) {
825 ret = set_one_vid(ndp, nc, &nca);
827 nd->state = ncsi_dev_state_config_ev;
828 schedule_work(&ndp->work);
832 nd->state = ncsi_dev_state_config_svf;
833 /* Enable/Disable the VLAN filter */
834 } else if (nd->state == ncsi_dev_state_config_ev) {
835 if (list_empty(&ndp->vlan_vids)) {
836 nca.type = NCSI_PKT_CMD_DV;
838 nca.type = NCSI_PKT_CMD_EV;
839 nca.bytes[3] = NCSI_CAP_VLAN_NO;
841 nd->state = ncsi_dev_state_config_sma;
842 } else if (nd->state == ncsi_dev_state_config_sma) {
843 /* Use first entry in unicast filter table. Note that
844 * the MAC filter table starts from entry 1 instead of
847 nca.type = NCSI_PKT_CMD_SMA;
848 for (index = 0; index < 6; index++)
849 nca.bytes[index] = dev->dev_addr[index];
852 nd->state = ncsi_dev_state_config_ebf;
853 } else if (nd->state == ncsi_dev_state_config_ebf) {
854 nca.type = NCSI_PKT_CMD_EBF;
855 nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
856 nd->state = ncsi_dev_state_config_ecnt;
857 #if IS_ENABLED(CONFIG_IPV6)
858 if (ndp->inet6_addr_num > 0 &&
859 (nc->caps[NCSI_CAP_GENERIC].cap &
860 NCSI_CAP_GENERIC_MC))
861 nd->state = ncsi_dev_state_config_egmf;
863 nd->state = ncsi_dev_state_config_ecnt;
864 } else if (nd->state == ncsi_dev_state_config_egmf) {
865 nca.type = NCSI_PKT_CMD_EGMF;
866 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
867 nd->state = ncsi_dev_state_config_ecnt;
868 #endif /* CONFIG_IPV6 */
869 } else if (nd->state == ncsi_dev_state_config_ecnt) {
870 nca.type = NCSI_PKT_CMD_ECNT;
871 nd->state = ncsi_dev_state_config_ec;
872 } else if (nd->state == ncsi_dev_state_config_ec) {
873 /* Enable AEN if it's supported */
874 nca.type = NCSI_PKT_CMD_EC;
875 nd->state = ncsi_dev_state_config_ae;
876 if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
877 nd->state = ncsi_dev_state_config_gls;
878 } else if (nd->state == ncsi_dev_state_config_ae) {
879 nca.type = NCSI_PKT_CMD_AE;
881 nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
882 nd->state = ncsi_dev_state_config_gls;
883 } else if (nd->state == ncsi_dev_state_config_gls) {
884 nca.type = NCSI_PKT_CMD_GLS;
885 nd->state = ncsi_dev_state_config_done;
888 ret = ncsi_xmit_cmd(&nca);
892 case ncsi_dev_state_config_done:
893 spin_lock_irqsave(&nc->lock, flags);
894 if (nc->reconfigure_needed) {
895 /* This channel's configuration has been updated
896 * part-way during the config state - start the
897 * channel configuration over
899 nc->reconfigure_needed = false;
900 nc->state = NCSI_CHANNEL_INACTIVE;
901 spin_unlock_irqrestore(&nc->lock, flags);
903 spin_lock_irqsave(&ndp->lock, flags);
904 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
905 spin_unlock_irqrestore(&ndp->lock, flags);
907 netdev_printk(KERN_DEBUG, dev,
908 "Dirty NCSI channel state reset\n");
909 ncsi_process_next_channel(ndp);
913 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
915 nc->state = NCSI_CHANNEL_ACTIVE;
918 nc->state = NCSI_CHANNEL_INACTIVE;
920 spin_unlock_irqrestore(&nc->lock, flags);
922 /* Update the hot channel */
923 spin_lock_irqsave(&ndp->lock, flags);
924 ndp->hot_channel = hot_nc;
925 spin_unlock_irqrestore(&ndp->lock, flags);
927 ncsi_start_channel_monitor(nc);
928 ncsi_process_next_channel(ndp);
931 netdev_warn(dev, "Wrong NCSI state 0x%x in config\n",
938 ncsi_report_link(ndp, true);
941 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
943 struct ncsi_package *np;
944 struct ncsi_channel *nc, *found, *hot_nc;
945 struct ncsi_channel_mode *ncm;
948 spin_lock_irqsave(&ndp->lock, flags);
949 hot_nc = ndp->hot_channel;
950 spin_unlock_irqrestore(&ndp->lock, flags);
952 /* The search is done once an inactive channel with up
956 NCSI_FOR_EACH_PACKAGE(ndp, np) {
957 NCSI_FOR_EACH_CHANNEL(np, nc) {
958 spin_lock_irqsave(&nc->lock, flags);
960 if (!list_empty(&nc->link) ||
961 nc->state != NCSI_CHANNEL_INACTIVE) {
962 spin_unlock_irqrestore(&nc->lock, flags);
972 ncm = &nc->modes[NCSI_MODE_LINK];
973 if (ncm->data[2] & 0x1) {
974 spin_unlock_irqrestore(&nc->lock, flags);
979 spin_unlock_irqrestore(&nc->lock, flags);
984 ncsi_report_link(ndp, true);
989 spin_lock_irqsave(&ndp->lock, flags);
990 list_add_tail_rcu(&found->link, &ndp->channel_queue);
991 spin_unlock_irqrestore(&ndp->lock, flags);
993 return ncsi_process_next_channel(ndp);
996 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
998 struct ncsi_package *np;
999 struct ncsi_channel *nc;
1002 /* The hardware arbitration is disabled if any one channel
1003 * doesn't support explicitly.
1005 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1006 NCSI_FOR_EACH_CHANNEL(np, nc) {
1007 cap = nc->caps[NCSI_CAP_GENERIC].cap;
1008 if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1009 (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1010 NCSI_CAP_GENERIC_HWA_SUPPORT) {
1011 ndp->flags &= ~NCSI_DEV_HWA;
1017 ndp->flags |= NCSI_DEV_HWA;
1021 static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
1023 struct ncsi_package *np;
1024 struct ncsi_channel *nc;
1025 unsigned long flags;
1027 /* Move all available channels to processing queue */
1028 spin_lock_irqsave(&ndp->lock, flags);
1029 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1030 NCSI_FOR_EACH_CHANNEL(np, nc) {
1031 WARN_ON_ONCE(nc->state != NCSI_CHANNEL_INACTIVE ||
1032 !list_empty(&nc->link));
1033 ncsi_stop_channel_monitor(nc);
1034 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1037 spin_unlock_irqrestore(&ndp->lock, flags);
1039 /* We can have no channels in extremely case */
1040 if (list_empty(&ndp->channel_queue)) {
1041 ncsi_report_link(ndp, false);
1045 return ncsi_process_next_channel(ndp);
1048 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1050 struct ncsi_dev *nd = &ndp->ndev;
1051 struct ncsi_package *np;
1052 struct ncsi_channel *nc;
1053 struct ncsi_cmd_arg nca;
1054 unsigned char index;
1058 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1059 switch (nd->state) {
1060 case ncsi_dev_state_probe:
1061 nd->state = ncsi_dev_state_probe_deselect;
1063 case ncsi_dev_state_probe_deselect:
1064 ndp->pending_req_num = 8;
1066 /* Deselect all possible packages */
1067 nca.type = NCSI_PKT_CMD_DP;
1068 nca.channel = NCSI_RESERVED_CHANNEL;
1069 for (index = 0; index < 8; index++) {
1070 nca.package = index;
1071 ret = ncsi_xmit_cmd(&nca);
1076 nd->state = ncsi_dev_state_probe_package;
1078 case ncsi_dev_state_probe_package:
1079 ndp->pending_req_num = 16;
1081 /* Select all possible packages */
1082 nca.type = NCSI_PKT_CMD_SP;
1084 nca.channel = NCSI_RESERVED_CHANNEL;
1085 for (index = 0; index < 8; index++) {
1086 nca.package = index;
1087 ret = ncsi_xmit_cmd(&nca);
1092 /* Disable all possible packages */
1093 nca.type = NCSI_PKT_CMD_DP;
1094 for (index = 0; index < 8; index++) {
1095 nca.package = index;
1096 ret = ncsi_xmit_cmd(&nca);
1101 nd->state = ncsi_dev_state_probe_channel;
1103 case ncsi_dev_state_probe_channel:
1104 if (!ndp->active_package)
1105 ndp->active_package = list_first_or_null_rcu(
1106 &ndp->packages, struct ncsi_package, node);
1107 else if (list_is_last(&ndp->active_package->node,
1109 ndp->active_package = NULL;
1111 ndp->active_package = list_next_entry(
1112 ndp->active_package, node);
1114 /* All available packages and channels are enumerated. The
1115 * enumeration happens for once when the NCSI interface is
1116 * started. So we need continue to start the interface after
1119 * We have to choose an active channel before configuring it.
1120 * Note that we possibly don't have active channel in extreme
1123 if (!ndp->active_package) {
1124 ndp->flags |= NCSI_DEV_PROBED;
1125 if (ncsi_check_hwa(ndp))
1126 ncsi_enable_hwa(ndp);
1128 ncsi_choose_active_channel(ndp);
1132 /* Select the active package */
1133 ndp->pending_req_num = 1;
1134 nca.type = NCSI_PKT_CMD_SP;
1136 nca.package = ndp->active_package->id;
1137 nca.channel = NCSI_RESERVED_CHANNEL;
1138 ret = ncsi_xmit_cmd(&nca);
1142 nd->state = ncsi_dev_state_probe_cis;
1144 case ncsi_dev_state_probe_cis:
1145 ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
1147 /* Clear initial state */
1148 nca.type = NCSI_PKT_CMD_CIS;
1149 nca.package = ndp->active_package->id;
1150 for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
1151 nca.channel = index;
1152 ret = ncsi_xmit_cmd(&nca);
1157 nd->state = ncsi_dev_state_probe_gvi;
1159 case ncsi_dev_state_probe_gvi:
1160 case ncsi_dev_state_probe_gc:
1161 case ncsi_dev_state_probe_gls:
1162 np = ndp->active_package;
1163 ndp->pending_req_num = np->channel_num;
1165 /* Retrieve version, capability or link status */
1166 if (nd->state == ncsi_dev_state_probe_gvi)
1167 nca.type = NCSI_PKT_CMD_GVI;
1168 else if (nd->state == ncsi_dev_state_probe_gc)
1169 nca.type = NCSI_PKT_CMD_GC;
1171 nca.type = NCSI_PKT_CMD_GLS;
1173 nca.package = np->id;
1174 NCSI_FOR_EACH_CHANNEL(np, nc) {
1175 nca.channel = nc->id;
1176 ret = ncsi_xmit_cmd(&nca);
1181 if (nd->state == ncsi_dev_state_probe_gvi)
1182 nd->state = ncsi_dev_state_probe_gc;
1183 else if (nd->state == ncsi_dev_state_probe_gc)
1184 nd->state = ncsi_dev_state_probe_gls;
1186 nd->state = ncsi_dev_state_probe_dp;
1188 case ncsi_dev_state_probe_dp:
1189 ndp->pending_req_num = 1;
1191 /* Deselect the active package */
1192 nca.type = NCSI_PKT_CMD_DP;
1193 nca.package = ndp->active_package->id;
1194 nca.channel = NCSI_RESERVED_CHANNEL;
1195 ret = ncsi_xmit_cmd(&nca);
1199 /* Scan channels in next package */
1200 nd->state = ncsi_dev_state_probe_channel;
1203 netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1209 ncsi_report_link(ndp, true);
1212 static void ncsi_dev_work(struct work_struct *work)
1214 struct ncsi_dev_priv *ndp = container_of(work,
1215 struct ncsi_dev_priv, work);
1216 struct ncsi_dev *nd = &ndp->ndev;
1218 switch (nd->state & ncsi_dev_state_major) {
1219 case ncsi_dev_state_probe:
1220 ncsi_probe_channel(ndp);
1222 case ncsi_dev_state_suspend:
1223 ncsi_suspend_channel(ndp);
1225 case ncsi_dev_state_config:
1226 ncsi_configure_channel(ndp);
1229 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1234 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1236 struct ncsi_channel *nc;
1238 unsigned long flags;
1240 spin_lock_irqsave(&ndp->lock, flags);
1241 nc = list_first_or_null_rcu(&ndp->channel_queue,
1242 struct ncsi_channel, link);
1244 spin_unlock_irqrestore(&ndp->lock, flags);
1248 list_del_init(&nc->link);
1249 spin_unlock_irqrestore(&ndp->lock, flags);
1251 spin_lock_irqsave(&nc->lock, flags);
1252 old_state = nc->state;
1253 nc->state = NCSI_CHANNEL_INVISIBLE;
1254 spin_unlock_irqrestore(&nc->lock, flags);
1256 ndp->active_channel = nc;
1257 ndp->active_package = nc->package;
1259 switch (old_state) {
1260 case NCSI_CHANNEL_INACTIVE:
1261 ndp->ndev.state = ncsi_dev_state_config;
1262 ncsi_configure_channel(ndp);
1264 case NCSI_CHANNEL_ACTIVE:
1265 ndp->ndev.state = ncsi_dev_state_suspend;
1266 ncsi_suspend_channel(ndp);
1269 netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1270 old_state, nc->package->id, nc->id);
1271 ncsi_report_link(ndp, false);
1278 ndp->active_channel = NULL;
1279 ndp->active_package = NULL;
1280 if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1281 ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1282 return ncsi_choose_active_channel(ndp);
1285 ncsi_report_link(ndp, false);
1289 #if IS_ENABLED(CONFIG_IPV6)
1290 static int ncsi_inet6addr_event(struct notifier_block *this,
1291 unsigned long event, void *data)
1293 struct inet6_ifaddr *ifa = data;
1294 struct net_device *dev = ifa->idev->dev;
1295 struct ncsi_dev *nd = ncsi_find_dev(dev);
1296 struct ncsi_dev_priv *ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
1297 struct ncsi_package *np;
1298 struct ncsi_channel *nc;
1299 struct ncsi_cmd_arg nca;
1303 if (!ndp || (ipv6_addr_type(&ifa->addr) &
1304 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK)))
1309 action = (++ndp->inet6_addr_num) == 1;
1310 nca.type = NCSI_PKT_CMD_EGMF;
1313 action = (--ndp->inet6_addr_num == 0);
1314 nca.type = NCSI_PKT_CMD_DGMF;
1320 /* We might not have active channel or packages. The IPv6
1321 * required multicast will be enabled when active channel
1322 * or packages are chosen.
1324 np = ndp->active_package;
1325 nc = ndp->active_channel;
1326 if (!action || !np || !nc)
1329 /* We needn't enable or disable it if the function isn't supported */
1330 if (!(nc->caps[NCSI_CAP_GENERIC].cap & NCSI_CAP_GENERIC_MC))
1335 nca.package = np->id;
1336 nca.channel = nc->id;
1337 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
1338 ret = ncsi_xmit_cmd(&nca);
1340 netdev_warn(dev, "Fail to %s global multicast filter (%d)\n",
1341 (event == NETDEV_UP) ? "enable" : "disable", ret);
1348 static struct notifier_block ncsi_inet6addr_notifier = {
1349 .notifier_call = ncsi_inet6addr_event,
1351 #endif /* CONFIG_IPV6 */
1353 static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1355 struct ncsi_dev *nd = &ndp->ndev;
1356 struct ncsi_channel *nc;
1357 struct ncsi_package *np;
1358 unsigned long flags;
1361 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1362 NCSI_FOR_EACH_CHANNEL(np, nc) {
1363 spin_lock_irqsave(&nc->lock, flags);
1365 /* Channels may be busy, mark dirty instead of
1367 * a) not ACTIVE (configured)
1368 * b) in the channel_queue (to be configured)
1369 * c) it's ndev is in the config state
1371 if (nc->state != NCSI_CHANNEL_ACTIVE) {
1372 if ((ndp->ndev.state & 0xff00) ==
1373 ncsi_dev_state_config ||
1374 !list_empty(&nc->link)) {
1375 netdev_printk(KERN_DEBUG, nd->dev,
1376 "ncsi: channel %p marked dirty\n",
1378 nc->reconfigure_needed = true;
1380 spin_unlock_irqrestore(&nc->lock, flags);
1384 spin_unlock_irqrestore(&nc->lock, flags);
1386 ncsi_stop_channel_monitor(nc);
1387 spin_lock_irqsave(&nc->lock, flags);
1388 nc->state = NCSI_CHANNEL_INACTIVE;
1389 spin_unlock_irqrestore(&nc->lock, flags);
1391 spin_lock_irqsave(&ndp->lock, flags);
1392 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1393 spin_unlock_irqrestore(&ndp->lock, flags);
1395 netdev_printk(KERN_DEBUG, nd->dev,
1396 "ncsi: kicked channel %p\n", nc);
1404 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1406 struct ncsi_channel_filter *ncf;
1407 struct ncsi_dev_priv *ndp;
1408 unsigned int n_vids = 0;
1409 struct vlan_vid *vlan;
1410 struct ncsi_dev *nd;
1416 nd = ncsi_find_dev(dev);
1418 netdev_warn(dev, "ncsi: No net_device?\n");
1422 ndp = TO_NCSI_DEV_PRIV(nd);
1423 ncf = ndp->hot_channel->filters[NCSI_FILTER_VLAN];
1425 /* Add the VLAN id to our internal list */
1426 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1428 if (vlan->vid == vid) {
1429 netdev_printk(KERN_DEBUG, dev,
1430 "vid %u already registered\n", vid);
1435 if (n_vids >= ncf->total) {
1437 "NCSI Channel supports up to %u VLAN tags but %u are already set\n",
1438 ncf->total, n_vids);
1442 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1446 vlan->proto = proto;
1448 list_add_rcu(&vlan->list, &ndp->vlan_vids);
1450 netdev_printk(KERN_DEBUG, dev, "Added new vid %u\n", vid);
1452 found = ncsi_kick_channels(ndp) != 0;
1454 return found ? ncsi_process_next_channel(ndp) : 0;
1456 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1458 int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1460 struct vlan_vid *vlan, *tmp;
1461 struct ncsi_dev_priv *ndp;
1462 struct ncsi_dev *nd;
1468 nd = ncsi_find_dev(dev);
1470 netdev_warn(dev, "ncsi: no net_device?\n");
1474 ndp = TO_NCSI_DEV_PRIV(nd);
1476 /* Remove the VLAN id from our internal list */
1477 list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1478 if (vlan->vid == vid) {
1479 netdev_printk(KERN_DEBUG, dev,
1480 "vid %u found, removing\n", vid);
1481 list_del_rcu(&vlan->list);
1487 netdev_err(dev, "ncsi: vid %u wasn't registered!\n", vid);
1491 found = ncsi_kick_channels(ndp) != 0;
1493 return found ? ncsi_process_next_channel(ndp) : 0;
1495 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1497 struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1498 void (*handler)(struct ncsi_dev *ndev))
1500 struct ncsi_dev_priv *ndp;
1501 struct ncsi_dev *nd;
1502 unsigned long flags;
1505 /* Check if the device has been registered or not */
1506 nd = ncsi_find_dev(dev);
1510 /* Create NCSI device */
1511 ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1516 nd->state = ncsi_dev_state_registered;
1518 nd->handler = handler;
1519 ndp->pending_req_num = 0;
1520 INIT_LIST_HEAD(&ndp->channel_queue);
1521 INIT_LIST_HEAD(&ndp->vlan_vids);
1522 INIT_WORK(&ndp->work, ncsi_dev_work);
1524 /* Initialize private NCSI device */
1525 spin_lock_init(&ndp->lock);
1526 INIT_LIST_HEAD(&ndp->packages);
1527 ndp->request_id = NCSI_REQ_START_IDX;
1528 for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1529 ndp->requests[i].id = i;
1530 ndp->requests[i].ndp = ndp;
1531 setup_timer(&ndp->requests[i].timer,
1532 ncsi_request_timeout,
1533 (unsigned long)&ndp->requests[i]);
1536 spin_lock_irqsave(&ncsi_dev_lock, flags);
1537 #if IS_ENABLED(CONFIG_IPV6)
1538 ndp->inet6_addr_num = 0;
1539 if (list_empty(&ncsi_dev_list))
1540 register_inet6addr_notifier(&ncsi_inet6addr_notifier);
1542 list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1543 spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1545 /* Register NCSI packet Rx handler */
1546 ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1547 ndp->ptype.func = ncsi_rcv_rsp;
1548 ndp->ptype.dev = dev;
1549 dev_add_pack(&ndp->ptype);
1553 EXPORT_SYMBOL_GPL(ncsi_register_dev);
1555 int ncsi_start_dev(struct ncsi_dev *nd)
1557 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1560 if (nd->state != ncsi_dev_state_registered &&
1561 nd->state != ncsi_dev_state_functional)
1564 if (!(ndp->flags & NCSI_DEV_PROBED)) {
1565 nd->state = ncsi_dev_state_probe;
1566 schedule_work(&ndp->work);
1570 if (ndp->flags & NCSI_DEV_HWA)
1571 ret = ncsi_enable_hwa(ndp);
1573 ret = ncsi_choose_active_channel(ndp);
1577 EXPORT_SYMBOL_GPL(ncsi_start_dev);
1579 void ncsi_stop_dev(struct ncsi_dev *nd)
1581 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1582 struct ncsi_package *np;
1583 struct ncsi_channel *nc;
1586 unsigned long flags;
1588 /* Stop the channel monitor and reset channel's state */
1589 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1590 NCSI_FOR_EACH_CHANNEL(np, nc) {
1591 ncsi_stop_channel_monitor(nc);
1593 spin_lock_irqsave(&nc->lock, flags);
1594 chained = !list_empty(&nc->link);
1595 old_state = nc->state;
1596 nc->state = NCSI_CHANNEL_INACTIVE;
1597 spin_unlock_irqrestore(&nc->lock, flags);
1599 WARN_ON_ONCE(chained ||
1600 old_state == NCSI_CHANNEL_INVISIBLE);
1604 ncsi_report_link(ndp, true);
1606 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1608 void ncsi_unregister_dev(struct ncsi_dev *nd)
1610 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1611 struct ncsi_package *np, *tmp;
1612 unsigned long flags;
1614 dev_remove_pack(&ndp->ptype);
1616 list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1617 ncsi_remove_package(np);
1619 spin_lock_irqsave(&ncsi_dev_lock, flags);
1620 list_del_rcu(&ndp->node);
1621 #if IS_ENABLED(CONFIG_IPV6)
1622 if (list_empty(&ncsi_dev_list))
1623 unregister_inet6addr_notifier(&ncsi_inet6addr_notifier);
1625 spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1629 EXPORT_SYMBOL_GPL(ncsi_unregister_dev);