2 * linux/drivers/net/ethernet/ibm/ehea/ehea_main.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/device.h>
34 #include <linux/tcp.h>
35 #include <linux/udp.h>
37 #include <linux/list.h>
38 #include <linux/slab.h>
39 #include <linux/if_ether.h>
40 #include <linux/notifier.h>
41 #include <linux/reboot.h>
42 #include <linux/memory.h>
43 #include <asm/kexec.h>
44 #include <linux/mutex.h>
45 #include <linux/prefetch.h>
51 #include "ehea_phyp.h"
54 MODULE_LICENSE("GPL");
55 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
56 MODULE_DESCRIPTION("IBM eServer HEA Driver");
57 MODULE_VERSION(DRV_VERSION);
60 static int msg_level = -1;
61 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
62 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
63 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
64 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
65 static int use_mcs = 1;
66 static int prop_carrier_state;
68 module_param(msg_level, int, 0);
69 module_param(rq1_entries, int, 0);
70 module_param(rq2_entries, int, 0);
71 module_param(rq3_entries, int, 0);
72 module_param(sq_entries, int, 0);
73 module_param(prop_carrier_state, int, 0);
74 module_param(use_mcs, int, 0);
76 MODULE_PARM_DESC(msg_level, "msg_level");
77 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
78 "port to stack. 1:yes, 0:no. Default = 0 ");
79 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
80 "[2^x - 1], x = [7..14]. Default = "
81 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
82 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
83 "[2^x - 1], x = [7..14]. Default = "
84 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
85 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
86 "[2^x - 1], x = [7..14]. Default = "
87 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
88 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
89 "[2^x - 1], x = [7..14]. Default = "
90 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
91 MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
94 static int port_name_cnt;
95 static LIST_HEAD(adapter_list);
96 static unsigned long ehea_driver_flags;
97 static DEFINE_MUTEX(dlpar_mem_lock);
98 static struct ehea_fw_handle_array ehea_fw_handles;
99 static struct ehea_bcmc_reg_array ehea_bcmc_regs;
102 static int ehea_probe_adapter(struct platform_device *dev);
104 static int ehea_remove(struct platform_device *dev);
106 static const struct of_device_id ehea_module_device_table[] = {
109 .compatible = "IBM,lhea",
113 .compatible = "IBM,lhea-ethernet",
117 MODULE_DEVICE_TABLE(of, ehea_module_device_table);
119 static const struct of_device_id ehea_device_table[] = {
122 .compatible = "IBM,lhea",
127 static struct platform_driver ehea_driver = {
130 .owner = THIS_MODULE,
131 .of_match_table = ehea_device_table,
133 .probe = ehea_probe_adapter,
134 .remove = ehea_remove,
137 void ehea_dump(void *adr, int len, char *msg)
140 unsigned char *deb = adr;
141 for (x = 0; x < len; x += 16) {
142 pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
143 msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
148 static void ehea_schedule_port_reset(struct ehea_port *port)
150 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
151 schedule_work(&port->reset_task);
154 static void ehea_update_firmware_handles(void)
156 struct ehea_fw_handle_entry *arr = NULL;
157 struct ehea_adapter *adapter;
158 int num_adapters = 0;
162 int num_fw_handles, k, l;
164 /* Determine number of handles */
165 mutex_lock(&ehea_fw_handles.lock);
167 list_for_each_entry(adapter, &adapter_list, list) {
170 for (k = 0; k < EHEA_MAX_PORTS; k++) {
171 struct ehea_port *port = adapter->port[k];
173 if (!port || (port->state != EHEA_PORT_UP))
177 num_portres += port->num_def_qps;
181 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
182 num_ports * EHEA_NUM_PORT_FW_HANDLES +
183 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
185 if (num_fw_handles) {
186 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
188 goto out; /* Keep the existing array */
192 list_for_each_entry(adapter, &adapter_list, list) {
193 if (num_adapters == 0)
196 for (k = 0; k < EHEA_MAX_PORTS; k++) {
197 struct ehea_port *port = adapter->port[k];
199 if (!port || (port->state != EHEA_PORT_UP) ||
203 for (l = 0; l < port->num_def_qps; l++) {
204 struct ehea_port_res *pr = &port->port_res[l];
206 arr[i].adh = adapter->handle;
207 arr[i++].fwh = pr->qp->fw_handle;
208 arr[i].adh = adapter->handle;
209 arr[i++].fwh = pr->send_cq->fw_handle;
210 arr[i].adh = adapter->handle;
211 arr[i++].fwh = pr->recv_cq->fw_handle;
212 arr[i].adh = adapter->handle;
213 arr[i++].fwh = pr->eq->fw_handle;
214 arr[i].adh = adapter->handle;
215 arr[i++].fwh = pr->send_mr.handle;
216 arr[i].adh = adapter->handle;
217 arr[i++].fwh = pr->recv_mr.handle;
219 arr[i].adh = adapter->handle;
220 arr[i++].fwh = port->qp_eq->fw_handle;
224 arr[i].adh = adapter->handle;
225 arr[i++].fwh = adapter->neq->fw_handle;
227 if (adapter->mr.handle) {
228 arr[i].adh = adapter->handle;
229 arr[i++].fwh = adapter->mr.handle;
235 kfree(ehea_fw_handles.arr);
236 ehea_fw_handles.arr = arr;
237 ehea_fw_handles.num_entries = i;
239 mutex_unlock(&ehea_fw_handles.lock);
242 static void ehea_update_bcmc_registrations(void)
245 struct ehea_bcmc_reg_entry *arr = NULL;
246 struct ehea_adapter *adapter;
247 struct ehea_mc_list *mc_entry;
248 int num_registrations = 0;
252 spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
254 /* Determine number of registrations */
255 list_for_each_entry(adapter, &adapter_list, list)
256 for (k = 0; k < EHEA_MAX_PORTS; k++) {
257 struct ehea_port *port = adapter->port[k];
259 if (!port || (port->state != EHEA_PORT_UP))
262 num_registrations += 2; /* Broadcast registrations */
264 list_for_each_entry(mc_entry, &port->mc_list->list,list)
265 num_registrations += 2;
268 if (num_registrations) {
269 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
271 goto out; /* Keep the existing array */
275 list_for_each_entry(adapter, &adapter_list, list) {
276 for (k = 0; k < EHEA_MAX_PORTS; k++) {
277 struct ehea_port *port = adapter->port[k];
279 if (!port || (port->state != EHEA_PORT_UP))
282 if (num_registrations == 0)
285 arr[i].adh = adapter->handle;
286 arr[i].port_id = port->logical_port_id;
287 arr[i].reg_type = EHEA_BCMC_BROADCAST |
289 arr[i++].macaddr = port->mac_addr;
291 arr[i].adh = adapter->handle;
292 arr[i].port_id = port->logical_port_id;
293 arr[i].reg_type = EHEA_BCMC_BROADCAST |
294 EHEA_BCMC_VLANID_ALL;
295 arr[i++].macaddr = port->mac_addr;
296 num_registrations -= 2;
298 list_for_each_entry(mc_entry,
299 &port->mc_list->list, list) {
300 if (num_registrations == 0)
303 arr[i].adh = adapter->handle;
304 arr[i].port_id = port->logical_port_id;
305 arr[i].reg_type = EHEA_BCMC_MULTICAST |
307 if (mc_entry->macaddr == 0)
308 arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
309 arr[i++].macaddr = mc_entry->macaddr;
311 arr[i].adh = adapter->handle;
312 arr[i].port_id = port->logical_port_id;
313 arr[i].reg_type = EHEA_BCMC_MULTICAST |
314 EHEA_BCMC_VLANID_ALL;
315 if (mc_entry->macaddr == 0)
316 arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
317 arr[i++].macaddr = mc_entry->macaddr;
318 num_registrations -= 2;
324 kfree(ehea_bcmc_regs.arr);
325 ehea_bcmc_regs.arr = arr;
326 ehea_bcmc_regs.num_entries = i;
328 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
331 static void ehea_get_stats64(struct net_device *dev,
332 struct rtnl_link_stats64 *stats)
334 struct ehea_port *port = netdev_priv(dev);
335 u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
338 for (i = 0; i < port->num_def_qps; i++) {
339 rx_packets += port->port_res[i].rx_packets;
340 rx_bytes += port->port_res[i].rx_bytes;
343 for (i = 0; i < port->num_def_qps; i++) {
344 tx_packets += port->port_res[i].tx_packets;
345 tx_bytes += port->port_res[i].tx_bytes;
348 stats->tx_packets = tx_packets;
349 stats->rx_bytes = rx_bytes;
350 stats->tx_bytes = tx_bytes;
351 stats->rx_packets = rx_packets;
353 stats->multicast = port->stats.multicast;
354 stats->rx_errors = port->stats.rx_errors;
357 static void ehea_update_stats(struct work_struct *work)
359 struct ehea_port *port =
360 container_of(work, struct ehea_port, stats_work.work);
361 struct net_device *dev = port->netdev;
362 struct rtnl_link_stats64 *stats = &port->stats;
363 struct hcp_ehea_port_cb2 *cb2;
366 cb2 = (void *)get_zeroed_page(GFP_KERNEL);
368 netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
372 hret = ehea_h_query_ehea_port(port->adapter->handle,
373 port->logical_port_id,
374 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
375 if (hret != H_SUCCESS) {
376 netdev_err(dev, "query_ehea_port failed\n");
380 if (netif_msg_hw(port))
381 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
383 stats->multicast = cb2->rxmcp;
384 stats->rx_errors = cb2->rxuerr;
387 free_page((unsigned long)cb2);
389 schedule_delayed_work(&port->stats_work,
390 round_jiffies_relative(msecs_to_jiffies(1000)));
393 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
395 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
396 struct net_device *dev = pr->port->netdev;
397 int max_index_mask = pr->rq1_skba.len - 1;
398 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
402 pr->rq1_skba.os_skbs = 0;
404 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
406 pr->rq1_skba.index = index;
407 pr->rq1_skba.os_skbs = fill_wqes;
411 for (i = 0; i < fill_wqes; i++) {
412 if (!skb_arr_rq1[index]) {
413 skb_arr_rq1[index] = netdev_alloc_skb(dev,
415 if (!skb_arr_rq1[index]) {
416 pr->rq1_skba.os_skbs = fill_wqes - i;
421 index &= max_index_mask;
429 ehea_update_rq1a(pr->qp, adder);
432 static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
434 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
435 struct net_device *dev = pr->port->netdev;
438 if (nr_rq1a > pr->rq1_skba.len) {
439 netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
443 for (i = 0; i < nr_rq1a; i++) {
444 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
449 ehea_update_rq1a(pr->qp, i - 1);
452 static int ehea_refill_rq_def(struct ehea_port_res *pr,
453 struct ehea_q_skb_arr *q_skba, int rq_nr,
454 int num_wqes, int wqe_type, int packet_size)
456 struct net_device *dev = pr->port->netdev;
457 struct ehea_qp *qp = pr->qp;
458 struct sk_buff **skb_arr = q_skba->arr;
459 struct ehea_rwqe *rwqe;
460 int i, index, max_index_mask, fill_wqes;
464 fill_wqes = q_skba->os_skbs + num_wqes;
467 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
468 q_skba->os_skbs = fill_wqes;
472 index = q_skba->index;
473 max_index_mask = q_skba->len - 1;
474 for (i = 0; i < fill_wqes; i++) {
478 skb = netdev_alloc_skb_ip_align(dev, packet_size);
480 q_skba->os_skbs = fill_wqes - i;
481 if (q_skba->os_skbs == q_skba->len - 2) {
482 netdev_info(pr->port->netdev,
483 "rq%i ran dry - no mem for skb\n",
490 skb_arr[index] = skb;
491 tmp_addr = ehea_map_vaddr(skb->data);
492 if (tmp_addr == -1) {
493 dev_consume_skb_any(skb);
494 q_skba->os_skbs = fill_wqes - i;
499 rwqe = ehea_get_next_rwqe(qp, rq_nr);
500 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
501 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
502 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
503 rwqe->sg_list[0].vaddr = tmp_addr;
504 rwqe->sg_list[0].len = packet_size;
505 rwqe->data_segments = 1;
508 index &= max_index_mask;
512 q_skba->index = index;
519 ehea_update_rq2a(pr->qp, adder);
521 ehea_update_rq3a(pr->qp, adder);
527 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
529 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
530 nr_of_wqes, EHEA_RWQE2_TYPE,
535 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
537 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
538 nr_of_wqes, EHEA_RWQE3_TYPE,
539 EHEA_MAX_PACKET_SIZE);
542 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
544 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
545 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
547 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
548 (cqe->header_length == 0))
553 static inline void ehea_fill_skb(struct net_device *dev,
554 struct sk_buff *skb, struct ehea_cqe *cqe,
555 struct ehea_port_res *pr)
557 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
559 skb_put(skb, length);
560 skb->protocol = eth_type_trans(skb, dev);
562 /* The packet was not an IPV4 packet so a complemented checksum was
563 calculated. The value is found in the Internet Checksum field. */
564 if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
565 skb->ip_summed = CHECKSUM_COMPLETE;
566 skb->csum = csum_unfold(~cqe->inet_checksum_value);
568 skb->ip_summed = CHECKSUM_UNNECESSARY;
570 skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
573 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
575 struct ehea_cqe *cqe)
577 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
588 prefetchw(pref + EHEA_CACHE_LINE);
590 pref = (skb_array[x]->data);
592 prefetch(pref + EHEA_CACHE_LINE);
593 prefetch(pref + EHEA_CACHE_LINE * 2);
594 prefetch(pref + EHEA_CACHE_LINE * 3);
597 skb = skb_array[skb_index];
598 skb_array[skb_index] = NULL;
602 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
603 int arr_len, int wqe_index)
615 prefetchw(pref + EHEA_CACHE_LINE);
617 pref = (skb_array[x]->data);
619 prefetchw(pref + EHEA_CACHE_LINE);
622 skb = skb_array[wqe_index];
623 skb_array[wqe_index] = NULL;
627 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
628 struct ehea_cqe *cqe, int *processed_rq2,
633 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
634 pr->p_stats.err_tcp_cksum++;
635 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
636 pr->p_stats.err_ip_cksum++;
637 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
638 pr->p_stats.err_frame_crc++;
642 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
644 } else if (rq == 3) {
646 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
650 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
651 if (netif_msg_rx_err(pr->port)) {
652 pr_err("Critical receive error for QP %d. Resetting port.\n",
653 pr->qp->init_attr.qp_nr);
654 ehea_dump(cqe, sizeof(*cqe), "CQE");
656 ehea_schedule_port_reset(pr->port);
663 static int ehea_proc_rwqes(struct net_device *dev,
664 struct ehea_port_res *pr,
667 struct ehea_port *port = pr->port;
668 struct ehea_qp *qp = pr->qp;
669 struct ehea_cqe *cqe;
671 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
672 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
673 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
674 int skb_arr_rq1_len = pr->rq1_skba.len;
675 int skb_arr_rq2_len = pr->rq2_skba.len;
676 int skb_arr_rq3_len = pr->rq3_skba.len;
677 int processed, processed_rq1, processed_rq2, processed_rq3;
678 u64 processed_bytes = 0;
679 int wqe_index, last_wqe_index, rq, port_reset;
681 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
684 cqe = ehea_poll_rq1(qp, &wqe_index);
685 while ((processed < budget) && cqe) {
689 if (netif_msg_rx_status(port))
690 ehea_dump(cqe, sizeof(*cqe), "CQE");
692 last_wqe_index = wqe_index;
694 if (!ehea_check_cqe(cqe, &rq)) {
697 skb = get_skb_by_index_ll(skb_arr_rq1,
700 if (unlikely(!skb)) {
701 netif_info(port, rx_err, dev,
702 "LL rq1: skb=NULL\n");
704 skb = netdev_alloc_skb(dev,
709 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
710 cqe->num_bytes_transfered - 4);
711 ehea_fill_skb(dev, skb, cqe, pr);
712 } else if (rq == 2) {
714 skb = get_skb_by_index(skb_arr_rq2,
715 skb_arr_rq2_len, cqe);
716 if (unlikely(!skb)) {
717 netif_err(port, rx_err, dev,
721 ehea_fill_skb(dev, skb, cqe, pr);
725 skb = get_skb_by_index(skb_arr_rq3,
726 skb_arr_rq3_len, cqe);
727 if (unlikely(!skb)) {
728 netif_err(port, rx_err, dev,
732 ehea_fill_skb(dev, skb, cqe, pr);
736 processed_bytes += skb->len;
738 if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
739 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
742 napi_gro_receive(&pr->napi, skb);
744 pr->p_stats.poll_receive_errors++;
745 port_reset = ehea_treat_poll_error(pr, rq, cqe,
751 cqe = ehea_poll_rq1(qp, &wqe_index);
754 pr->rx_packets += processed;
755 pr->rx_bytes += processed_bytes;
757 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
758 ehea_refill_rq2(pr, processed_rq2);
759 ehea_refill_rq3(pr, processed_rq3);
764 #define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
766 static void reset_sq_restart_flag(struct ehea_port *port)
770 for (i = 0; i < port->num_def_qps; i++) {
771 struct ehea_port_res *pr = &port->port_res[i];
772 pr->sq_restart_flag = 0;
774 wake_up(&port->restart_wq);
777 static void check_sqs(struct ehea_port *port)
779 struct ehea_swqe *swqe;
783 for (i = 0; i < port->num_def_qps; i++) {
784 struct ehea_port_res *pr = &port->port_res[i];
786 swqe = ehea_get_swqe(pr->qp, &swqe_index);
787 memset(swqe, 0, SWQE_HEADER_SIZE);
788 atomic_dec(&pr->swqe_avail);
790 swqe->tx_control |= EHEA_SWQE_PURGE;
791 swqe->wr_id = SWQE_RESTART_CHECK;
792 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
793 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
794 swqe->immediate_data_length = 80;
796 ehea_post_swqe(pr->qp, swqe);
798 ret = wait_event_timeout(port->restart_wq,
799 pr->sq_restart_flag == 0,
800 msecs_to_jiffies(100));
803 pr_err("HW/SW queues out of sync\n");
804 ehea_schedule_port_reset(pr->port);
811 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
814 struct ehea_cq *send_cq = pr->send_cq;
815 struct ehea_cqe *cqe;
816 int quota = my_quota;
820 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
821 pr - &pr->port->port_res[0]);
823 cqe = ehea_poll_cq(send_cq);
824 while (cqe && (quota > 0)) {
825 ehea_inc_cq(send_cq);
830 if (cqe->wr_id == SWQE_RESTART_CHECK) {
831 pr->sq_restart_flag = 1;
836 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
837 pr_err("Bad send completion status=0x%04X\n",
840 if (netif_msg_tx_err(pr->port))
841 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
843 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
844 pr_err("Resetting port\n");
845 ehea_schedule_port_reset(pr->port);
850 if (netif_msg_tx_done(pr->port))
851 ehea_dump(cqe, sizeof(*cqe), "CQE");
853 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
854 == EHEA_SWQE2_TYPE)) {
856 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
857 skb = pr->sq_skba.arr[index];
858 dev_consume_skb_any(skb);
859 pr->sq_skba.arr[index] = NULL;
862 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
865 cqe = ehea_poll_cq(send_cq);
868 ehea_update_feca(send_cq, cqe_counter);
869 atomic_add(swqe_av, &pr->swqe_avail);
871 if (unlikely(netif_tx_queue_stopped(txq) &&
872 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
873 __netif_tx_lock(txq, smp_processor_id());
874 if (netif_tx_queue_stopped(txq) &&
875 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
876 netif_tx_wake_queue(txq);
877 __netif_tx_unlock(txq);
880 wake_up(&pr->port->swqe_avail_wq);
885 #define EHEA_POLL_MAX_CQES 65535
887 static int ehea_poll(struct napi_struct *napi, int budget)
889 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
891 struct net_device *dev = pr->port->netdev;
892 struct ehea_cqe *cqe;
893 struct ehea_cqe *cqe_skb = NULL;
897 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
898 rx += ehea_proc_rwqes(dev, pr, budget - rx);
900 while (rx != budget) {
902 ehea_reset_cq_ep(pr->recv_cq);
903 ehea_reset_cq_ep(pr->send_cq);
904 ehea_reset_cq_n1(pr->recv_cq);
905 ehea_reset_cq_n1(pr->send_cq);
907 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
908 cqe_skb = ehea_poll_cq(pr->send_cq);
910 if (!cqe && !cqe_skb)
913 if (!napi_reschedule(napi))
916 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
917 rx += ehea_proc_rwqes(dev, pr, budget - rx);
923 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
925 struct ehea_port_res *pr = param;
927 napi_schedule(&pr->napi);
932 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
934 struct ehea_port *port = param;
935 struct ehea_eqe *eqe;
938 u64 resource_type, aer, aerr;
941 eqe = ehea_poll_eq(port->qp_eq);
944 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
945 pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
946 eqe->entry, qp_token);
948 qp = port->port_res[qp_token].qp;
950 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
953 if (resource_type == EHEA_AER_RESTYPE_QP) {
954 if ((aer & EHEA_AER_RESET_MASK) ||
955 (aerr & EHEA_AERR_RESET_MASK))
958 reset_port = 1; /* Reset in case of CQ or EQ error */
960 eqe = ehea_poll_eq(port->qp_eq);
964 pr_err("Resetting port\n");
965 ehea_schedule_port_reset(port);
971 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
976 for (i = 0; i < EHEA_MAX_PORTS; i++)
977 if (adapter->port[i])
978 if (adapter->port[i]->logical_port_id == logical_port)
979 return adapter->port[i];
983 int ehea_sense_port_attr(struct ehea_port *port)
987 struct hcp_ehea_port_cb0 *cb0;
989 /* may be called via ehea_neq_tasklet() */
990 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
992 pr_err("no mem for cb0\n");
997 hret = ehea_h_query_ehea_port(port->adapter->handle,
998 port->logical_port_id, H_PORT_CB0,
999 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
1001 if (hret != H_SUCCESS) {
1007 port->mac_addr = cb0->port_mac_addr << 16;
1009 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
1010 ret = -EADDRNOTAVAIL;
1015 switch (cb0->port_speed) {
1017 port->port_speed = EHEA_SPEED_10M;
1018 port->full_duplex = 0;
1021 port->port_speed = EHEA_SPEED_10M;
1022 port->full_duplex = 1;
1024 case H_SPEED_100M_H:
1025 port->port_speed = EHEA_SPEED_100M;
1026 port->full_duplex = 0;
1028 case H_SPEED_100M_F:
1029 port->port_speed = EHEA_SPEED_100M;
1030 port->full_duplex = 1;
1033 port->port_speed = EHEA_SPEED_1G;
1034 port->full_duplex = 1;
1037 port->port_speed = EHEA_SPEED_10G;
1038 port->full_duplex = 1;
1041 port->port_speed = 0;
1042 port->full_duplex = 0;
1047 port->num_mcs = cb0->num_default_qps;
1049 /* Number of default QPs */
1051 port->num_def_qps = cb0->num_default_qps;
1053 port->num_def_qps = 1;
1055 if (!port->num_def_qps) {
1062 if (ret || netif_msg_probe(port))
1063 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1064 free_page((unsigned long)cb0);
1069 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1071 struct hcp_ehea_port_cb4 *cb4;
1075 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1077 pr_err("no mem for cb4\n");
1082 cb4->port_speed = port_speed;
1084 netif_carrier_off(port->netdev);
1086 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1087 port->logical_port_id,
1088 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1089 if (hret == H_SUCCESS) {
1090 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1092 hret = ehea_h_query_ehea_port(port->adapter->handle,
1093 port->logical_port_id,
1094 H_PORT_CB4, H_PORT_CB4_SPEED,
1096 if (hret == H_SUCCESS) {
1097 switch (cb4->port_speed) {
1099 port->port_speed = EHEA_SPEED_10M;
1100 port->full_duplex = 0;
1103 port->port_speed = EHEA_SPEED_10M;
1104 port->full_duplex = 1;
1106 case H_SPEED_100M_H:
1107 port->port_speed = EHEA_SPEED_100M;
1108 port->full_duplex = 0;
1110 case H_SPEED_100M_F:
1111 port->port_speed = EHEA_SPEED_100M;
1112 port->full_duplex = 1;
1115 port->port_speed = EHEA_SPEED_1G;
1116 port->full_duplex = 1;
1119 port->port_speed = EHEA_SPEED_10G;
1120 port->full_duplex = 1;
1123 port->port_speed = 0;
1124 port->full_duplex = 0;
1128 pr_err("Failed sensing port speed\n");
1132 if (hret == H_AUTHORITY) {
1133 pr_info("Hypervisor denied setting port speed\n");
1137 pr_err("Failed setting port speed\n");
1140 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1141 netif_carrier_on(port->netdev);
1143 free_page((unsigned long)cb4);
1148 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1153 struct ehea_port *port;
1154 struct net_device *dev;
1156 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1157 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1158 port = ehea_get_port(adapter, portnum);
1160 netdev_err(NULL, "unknown portnum %x\n", portnum);
1166 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1168 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1169 if (!netif_carrier_ok(dev)) {
1170 ret = ehea_sense_port_attr(port);
1172 netdev_err(dev, "failed resensing port attributes\n");
1176 netif_info(port, link, dev,
1177 "Logical port up: %dMbps %s Duplex\n",
1179 port->full_duplex == 1 ?
1182 netif_carrier_on(dev);
1183 netif_wake_queue(dev);
1186 if (netif_carrier_ok(dev)) {
1187 netif_info(port, link, dev,
1188 "Logical port down\n");
1189 netif_carrier_off(dev);
1190 netif_tx_disable(dev);
1193 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1194 port->phy_link = EHEA_PHY_LINK_UP;
1195 netif_info(port, link, dev,
1196 "Physical port up\n");
1197 if (prop_carrier_state)
1198 netif_carrier_on(dev);
1200 port->phy_link = EHEA_PHY_LINK_DOWN;
1201 netif_info(port, link, dev,
1202 "Physical port down\n");
1203 if (prop_carrier_state)
1204 netif_carrier_off(dev);
1207 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1209 "External switch port is primary port\n");
1212 "External switch port is backup port\n");
1215 case EHEA_EC_ADAPTER_MALFUNC:
1216 netdev_err(dev, "Adapter malfunction\n");
1218 case EHEA_EC_PORT_MALFUNC:
1219 netdev_info(dev, "Port malfunction\n");
1220 netif_carrier_off(dev);
1221 netif_tx_disable(dev);
1224 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1229 static void ehea_neq_tasklet(unsigned long data)
1231 struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1232 struct ehea_eqe *eqe;
1235 eqe = ehea_poll_eq(adapter->neq);
1236 pr_debug("eqe=%p\n", eqe);
1239 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1240 ehea_parse_eqe(adapter, eqe->entry);
1241 eqe = ehea_poll_eq(adapter->neq);
1242 pr_debug("next eqe=%p\n", eqe);
1245 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1246 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1247 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1249 ehea_h_reset_events(adapter->handle,
1250 adapter->neq->fw_handle, event_mask);
1253 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1255 struct ehea_adapter *adapter = param;
1256 tasklet_hi_schedule(&adapter->neq_tasklet);
1261 static int ehea_fill_port_res(struct ehea_port_res *pr)
1264 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1266 ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1268 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1270 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1275 static int ehea_reg_interrupts(struct net_device *dev)
1277 struct ehea_port *port = netdev_priv(dev);
1278 struct ehea_port_res *pr;
1282 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1285 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1286 ehea_qp_aff_irq_handler,
1287 0, port->int_aff_name, port);
1289 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1290 port->qp_eq->attr.ist1);
1294 netif_info(port, ifup, dev,
1295 "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1296 port->qp_eq->attr.ist1);
1299 for (i = 0; i < port->num_def_qps; i++) {
1300 pr = &port->port_res[i];
1301 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1302 "%s-queue%d", dev->name, i);
1303 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1304 ehea_recv_irq_handler,
1305 0, pr->int_send_name, pr);
1307 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1308 i, pr->eq->attr.ist1);
1311 netif_info(port, ifup, dev,
1312 "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1313 pr->eq->attr.ist1, i);
1321 u32 ist = port->port_res[i].eq->attr.ist1;
1322 ibmebus_free_irq(ist, &port->port_res[i]);
1326 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1327 i = port->num_def_qps;
1333 static void ehea_free_interrupts(struct net_device *dev)
1335 struct ehea_port *port = netdev_priv(dev);
1336 struct ehea_port_res *pr;
1341 for (i = 0; i < port->num_def_qps; i++) {
1342 pr = &port->port_res[i];
1343 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1344 netif_info(port, intr, dev,
1345 "free send irq for res %d with handle 0x%X\n",
1346 i, pr->eq->attr.ist1);
1349 /* associated events */
1350 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1351 netif_info(port, intr, dev,
1352 "associated event interrupt for handle 0x%X freed\n",
1353 port->qp_eq->attr.ist1);
1356 static int ehea_configure_port(struct ehea_port *port)
1360 struct hcp_ehea_port_cb0 *cb0;
1363 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1367 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1368 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1369 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1370 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1371 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1372 PXLY_RC_VLAN_FILTER)
1373 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1375 for (i = 0; i < port->num_mcs; i++)
1377 cb0->default_qpn_arr[i] =
1378 port->port_res[i].qp->init_attr.qp_nr;
1380 cb0->default_qpn_arr[i] =
1381 port->port_res[0].qp->init_attr.qp_nr;
1383 if (netif_msg_ifup(port))
1384 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1386 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1387 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1389 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1390 port->logical_port_id,
1391 H_PORT_CB0, mask, cb0);
1393 if (hret != H_SUCCESS)
1399 free_page((unsigned long)cb0);
1404 static int ehea_gen_smrs(struct ehea_port_res *pr)
1407 struct ehea_adapter *adapter = pr->port->adapter;
1409 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1413 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1420 ehea_rem_mr(&pr->send_mr);
1422 pr_err("Generating SMRS failed\n");
1426 static int ehea_rem_smrs(struct ehea_port_res *pr)
1428 if ((ehea_rem_mr(&pr->send_mr)) ||
1429 (ehea_rem_mr(&pr->recv_mr)))
1435 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1437 int arr_size = sizeof(void *) * max_q_entries;
1439 q_skba->arr = vzalloc(arr_size);
1443 q_skba->len = max_q_entries;
1445 q_skba->os_skbs = 0;
1450 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1451 struct port_res_cfg *pr_cfg, int queue_token)
1453 struct ehea_adapter *adapter = port->adapter;
1454 enum ehea_eq_type eq_type = EHEA_EQ;
1455 struct ehea_qp_init_attr *init_attr = NULL;
1457 u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1459 tx_bytes = pr->tx_bytes;
1460 tx_packets = pr->tx_packets;
1461 rx_bytes = pr->rx_bytes;
1462 rx_packets = pr->rx_packets;
1464 memset(pr, 0, sizeof(struct ehea_port_res));
1466 pr->tx_bytes = rx_bytes;
1467 pr->tx_packets = tx_packets;
1468 pr->rx_bytes = rx_bytes;
1469 pr->rx_packets = rx_packets;
1473 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1475 pr_err("create_eq failed (eq)\n");
1479 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1481 port->logical_port_id);
1483 pr_err("create_cq failed (cq_recv)\n");
1487 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1489 port->logical_port_id);
1491 pr_err("create_cq failed (cq_send)\n");
1495 if (netif_msg_ifup(port))
1496 pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1497 pr->send_cq->attr.act_nr_of_cqes,
1498 pr->recv_cq->attr.act_nr_of_cqes);
1500 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1503 pr_err("no mem for ehea_qp_init_attr\n");
1507 init_attr->low_lat_rq1 = 1;
1508 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1509 init_attr->rq_count = 3;
1510 init_attr->qp_token = queue_token;
1511 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1512 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1513 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1514 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1515 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1516 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1517 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1518 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1519 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1520 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1521 init_attr->port_nr = port->logical_port_id;
1522 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1523 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1524 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1526 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1528 pr_err("create_qp failed\n");
1533 if (netif_msg_ifup(port))
1534 pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1536 init_attr->act_nr_send_wqes,
1537 init_attr->act_nr_rwqes_rq1,
1538 init_attr->act_nr_rwqes_rq2,
1539 init_attr->act_nr_rwqes_rq3);
1541 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1543 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1544 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1545 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1546 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1550 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1551 if (ehea_gen_smrs(pr) != 0) {
1556 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1560 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1567 vfree(pr->sq_skba.arr);
1568 vfree(pr->rq1_skba.arr);
1569 vfree(pr->rq2_skba.arr);
1570 vfree(pr->rq3_skba.arr);
1571 ehea_destroy_qp(pr->qp);
1572 ehea_destroy_cq(pr->send_cq);
1573 ehea_destroy_cq(pr->recv_cq);
1574 ehea_destroy_eq(pr->eq);
1579 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1584 netif_napi_del(&pr->napi);
1586 ret = ehea_destroy_qp(pr->qp);
1589 ehea_destroy_cq(pr->send_cq);
1590 ehea_destroy_cq(pr->recv_cq);
1591 ehea_destroy_eq(pr->eq);
1593 for (i = 0; i < pr->rq1_skba.len; i++)
1594 if (pr->rq1_skba.arr[i])
1595 dev_kfree_skb(pr->rq1_skba.arr[i]);
1597 for (i = 0; i < pr->rq2_skba.len; i++)
1598 if (pr->rq2_skba.arr[i])
1599 dev_kfree_skb(pr->rq2_skba.arr[i]);
1601 for (i = 0; i < pr->rq3_skba.len; i++)
1602 if (pr->rq3_skba.arr[i])
1603 dev_kfree_skb(pr->rq3_skba.arr[i]);
1605 for (i = 0; i < pr->sq_skba.len; i++)
1606 if (pr->sq_skba.arr[i])
1607 dev_kfree_skb(pr->sq_skba.arr[i]);
1609 vfree(pr->rq1_skba.arr);
1610 vfree(pr->rq2_skba.arr);
1611 vfree(pr->rq3_skba.arr);
1612 vfree(pr->sq_skba.arr);
1613 ret = ehea_rem_smrs(pr);
1618 static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
1621 int skb_data_size = skb_headlen(skb);
1622 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1623 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1624 unsigned int immediate_len = SWQE2_MAX_IMM;
1626 swqe->descriptors = 0;
1628 if (skb_is_gso(skb)) {
1629 swqe->tx_control |= EHEA_SWQE_TSO;
1630 swqe->mss = skb_shinfo(skb)->gso_size;
1632 * For TSO packets we only copy the headers into the
1635 immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1638 if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
1639 skb_copy_from_linear_data(skb, imm_data, immediate_len);
1640 swqe->immediate_data_length = immediate_len;
1642 if (skb_data_size > immediate_len) {
1643 sg1entry->l_key = lkey;
1644 sg1entry->len = skb_data_size - immediate_len;
1646 ehea_map_vaddr(skb->data + immediate_len);
1647 swqe->descriptors++;
1650 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1651 swqe->immediate_data_length = skb_data_size;
1655 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1656 struct ehea_swqe *swqe, u32 lkey)
1658 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1660 int nfrags, sg1entry_contains_frag_data, i;
1662 nfrags = skb_shinfo(skb)->nr_frags;
1663 sg1entry = &swqe->u.immdata_desc.sg_entry;
1664 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1665 sg1entry_contains_frag_data = 0;
1667 write_swqe2_immediate(skb, swqe, lkey);
1669 /* write descriptors */
1671 if (swqe->descriptors == 0) {
1672 /* sg1entry not yet used */
1673 frag = &skb_shinfo(skb)->frags[0];
1675 /* copy sg1entry data */
1676 sg1entry->l_key = lkey;
1677 sg1entry->len = skb_frag_size(frag);
1679 ehea_map_vaddr(skb_frag_address(frag));
1680 swqe->descriptors++;
1681 sg1entry_contains_frag_data = 1;
1684 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1686 frag = &skb_shinfo(skb)->frags[i];
1687 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1689 sgentry->l_key = lkey;
1690 sgentry->len = skb_frag_size(frag);
1691 sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
1692 swqe->descriptors++;
1697 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1703 /* De/Register untagged packets */
1704 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1705 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1706 port->logical_port_id,
1707 reg_type, port->mac_addr, 0, hcallid);
1708 if (hret != H_SUCCESS) {
1709 pr_err("%sregistering bc address failed (tagged)\n",
1710 hcallid == H_REG_BCMC ? "" : "de");
1715 /* De/Register VLAN packets */
1716 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1717 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1718 port->logical_port_id,
1719 reg_type, port->mac_addr, 0, hcallid);
1720 if (hret != H_SUCCESS) {
1721 pr_err("%sregistering bc address failed (vlan)\n",
1722 hcallid == H_REG_BCMC ? "" : "de");
1729 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1731 struct ehea_port *port = netdev_priv(dev);
1732 struct sockaddr *mac_addr = sa;
1733 struct hcp_ehea_port_cb0 *cb0;
1737 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1738 ret = -EADDRNOTAVAIL;
1742 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1744 pr_err("no mem for cb0\n");
1749 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1751 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1753 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1754 port->logical_port_id, H_PORT_CB0,
1755 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1756 if (hret != H_SUCCESS) {
1761 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1763 /* Deregister old MAC in pHYP */
1764 if (port->state == EHEA_PORT_UP) {
1765 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1770 port->mac_addr = cb0->port_mac_addr << 16;
1772 /* Register new MAC in pHYP */
1773 if (port->state == EHEA_PORT_UP) {
1774 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1782 ehea_update_bcmc_registrations();
1784 free_page((unsigned long)cb0);
1789 static void ehea_promiscuous_error(u64 hret, int enable)
1791 if (hret == H_AUTHORITY)
1792 pr_info("Hypervisor denied %sabling promiscuous mode\n",
1793 enable == 1 ? "en" : "dis");
1795 pr_err("failed %sabling promiscuous mode\n",
1796 enable == 1 ? "en" : "dis");
1799 static void ehea_promiscuous(struct net_device *dev, int enable)
1801 struct ehea_port *port = netdev_priv(dev);
1802 struct hcp_ehea_port_cb7 *cb7;
1805 if (enable == port->promisc)
1808 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1810 pr_err("no mem for cb7\n");
1814 /* Modify Pxs_DUCQPN in CB7 */
1815 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1817 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1818 port->logical_port_id,
1819 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1821 ehea_promiscuous_error(hret, enable);
1825 port->promisc = enable;
1827 free_page((unsigned long)cb7);
1830 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1836 reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
1837 if (mc_mac_addr == 0)
1838 reg_type |= EHEA_BCMC_SCOPE_ALL;
1840 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1841 port->logical_port_id,
1842 reg_type, mc_mac_addr, 0, hcallid);
1846 reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
1847 if (mc_mac_addr == 0)
1848 reg_type |= EHEA_BCMC_SCOPE_ALL;
1850 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1851 port->logical_port_id,
1852 reg_type, mc_mac_addr, 0, hcallid);
1857 static int ehea_drop_multicast_list(struct net_device *dev)
1859 struct ehea_port *port = netdev_priv(dev);
1860 struct ehea_mc_list *mc_entry = port->mc_list;
1861 struct list_head *pos;
1862 struct list_head *temp;
1866 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1867 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1869 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1872 pr_err("failed deregistering mcast MAC\n");
1882 static void ehea_allmulti(struct net_device *dev, int enable)
1884 struct ehea_port *port = netdev_priv(dev);
1887 if (!port->allmulti) {
1889 /* Enable ALLMULTI */
1890 ehea_drop_multicast_list(dev);
1891 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1896 "failed enabling IFF_ALLMULTI\n");
1900 /* Disable ALLMULTI */
1901 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1906 "failed disabling IFF_ALLMULTI\n");
1911 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1913 struct ehea_mc_list *ehea_mcl_entry;
1916 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1917 if (!ehea_mcl_entry)
1920 INIT_LIST_HEAD(&ehea_mcl_entry->list);
1922 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1924 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1927 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1929 pr_err("failed registering mcast MAC\n");
1930 kfree(ehea_mcl_entry);
1934 static void ehea_set_multicast_list(struct net_device *dev)
1936 struct ehea_port *port = netdev_priv(dev);
1937 struct netdev_hw_addr *ha;
1940 ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
1942 if (dev->flags & IFF_ALLMULTI) {
1943 ehea_allmulti(dev, 1);
1946 ehea_allmulti(dev, 0);
1948 if (!netdev_mc_empty(dev)) {
1949 ret = ehea_drop_multicast_list(dev);
1951 /* Dropping the current multicast list failed.
1952 * Enabling ALL_MULTI is the best we can do.
1954 ehea_allmulti(dev, 1);
1957 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
1958 pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
1959 port->adapter->max_mc_mac);
1963 netdev_for_each_mc_addr(ha, dev)
1964 ehea_add_multicast_entry(port, ha->addr);
1968 ehea_update_bcmc_registrations();
1971 static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
1973 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
1975 if (vlan_get_protocol(skb) != htons(ETH_P_IP))
1978 if (skb->ip_summed == CHECKSUM_PARTIAL)
1979 swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
1981 swqe->ip_start = skb_network_offset(skb);
1982 swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
1984 switch (ip_hdr(skb)->protocol) {
1986 if (skb->ip_summed == CHECKSUM_PARTIAL)
1987 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
1989 swqe->tcp_offset = swqe->ip_end + 1 +
1990 offsetof(struct udphdr, check);
1994 if (skb->ip_summed == CHECKSUM_PARTIAL)
1995 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
1997 swqe->tcp_offset = swqe->ip_end + 1 +
1998 offsetof(struct tcphdr, check);
2003 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
2004 struct ehea_swqe *swqe, u32 lkey)
2006 swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
2008 xmit_common(skb, swqe);
2010 write_swqe2_data(skb, dev, swqe, lkey);
2013 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2014 struct ehea_swqe *swqe)
2016 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2018 xmit_common(skb, swqe);
2021 skb_copy_from_linear_data(skb, imm_data, skb->len);
2023 skb_copy_bits(skb, 0, imm_data, skb->len);
2025 swqe->immediate_data_length = skb->len;
2026 dev_consume_skb_any(skb);
2029 static netdev_tx_t ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2031 struct ehea_port *port = netdev_priv(dev);
2032 struct ehea_swqe *swqe;
2035 struct ehea_port_res *pr;
2036 struct netdev_queue *txq;
2038 pr = &port->port_res[skb_get_queue_mapping(skb)];
2039 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2041 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2042 memset(swqe, 0, SWQE_HEADER_SIZE);
2043 atomic_dec(&pr->swqe_avail);
2045 if (skb_vlan_tag_present(skb)) {
2046 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2047 swqe->vlan_tag = skb_vlan_tag_get(skb);
2051 pr->tx_bytes += skb->len;
2053 if (skb->len <= SWQE3_MAX_IMM) {
2054 u32 sig_iv = port->sig_comp_iv;
2055 u32 swqe_num = pr->swqe_id_counter;
2056 ehea_xmit3(skb, dev, swqe);
2057 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2058 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2059 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2060 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2062 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2063 pr->swqe_ll_count = 0;
2065 pr->swqe_ll_count += 1;
2068 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2069 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2070 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2071 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2072 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2074 pr->sq_skba.index++;
2075 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2077 lkey = pr->send_mr.lkey;
2078 ehea_xmit2(skb, dev, swqe, lkey);
2079 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2081 pr->swqe_id_counter += 1;
2083 netif_info(port, tx_queued, dev,
2084 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2085 if (netif_msg_tx_queued(port))
2086 ehea_dump(swqe, 512, "swqe");
2088 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2089 netif_tx_stop_queue(txq);
2090 swqe->tx_control |= EHEA_SWQE_PURGE;
2093 ehea_post_swqe(pr->qp, swqe);
2095 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2096 pr->p_stats.queue_stopped++;
2097 netif_tx_stop_queue(txq);
2100 return NETDEV_TX_OK;
2103 static int ehea_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
2105 struct ehea_port *port = netdev_priv(dev);
2106 struct ehea_adapter *adapter = port->adapter;
2107 struct hcp_ehea_port_cb1 *cb1;
2112 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2114 pr_err("no mem for cb1\n");
2119 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2120 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2121 if (hret != H_SUCCESS) {
2122 pr_err("query_ehea_port failed\n");
2128 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2130 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2131 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2132 if (hret != H_SUCCESS) {
2133 pr_err("modify_ehea_port failed\n");
2137 free_page((unsigned long)cb1);
2141 static int ehea_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
2143 struct ehea_port *port = netdev_priv(dev);
2144 struct ehea_adapter *adapter = port->adapter;
2145 struct hcp_ehea_port_cb1 *cb1;
2150 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2152 pr_err("no mem for cb1\n");
2157 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2158 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2159 if (hret != H_SUCCESS) {
2160 pr_err("query_ehea_port failed\n");
2166 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2168 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2169 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2170 if (hret != H_SUCCESS) {
2171 pr_err("modify_ehea_port failed\n");
2175 free_page((unsigned long)cb1);
2179 static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2185 struct hcp_modify_qp_cb0 *cb0;
2187 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2193 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2194 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2195 if (hret != H_SUCCESS) {
2196 pr_err("query_ehea_qp failed (1)\n");
2200 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2201 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2202 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2203 &dummy64, &dummy64, &dummy16, &dummy16);
2204 if (hret != H_SUCCESS) {
2205 pr_err("modify_ehea_qp failed (1)\n");
2209 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2210 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2211 if (hret != H_SUCCESS) {
2212 pr_err("query_ehea_qp failed (2)\n");
2216 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2217 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2218 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2219 &dummy64, &dummy64, &dummy16, &dummy16);
2220 if (hret != H_SUCCESS) {
2221 pr_err("modify_ehea_qp failed (2)\n");
2225 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2226 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2227 if (hret != H_SUCCESS) {
2228 pr_err("query_ehea_qp failed (3)\n");
2232 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2233 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2234 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2235 &dummy64, &dummy64, &dummy16, &dummy16);
2236 if (hret != H_SUCCESS) {
2237 pr_err("modify_ehea_qp failed (3)\n");
2241 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2242 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2243 if (hret != H_SUCCESS) {
2244 pr_err("query_ehea_qp failed (4)\n");
2250 free_page((unsigned long)cb0);
2254 static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
2257 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2258 enum ehea_eq_type eq_type = EHEA_EQ;
2260 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2261 EHEA_MAX_ENTRIES_EQ, 1);
2264 pr_err("ehea_create_eq failed (qp_eq)\n");
2268 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2269 pr_cfg.max_entries_scq = sq_entries * 2;
2270 pr_cfg.max_entries_sq = sq_entries;
2271 pr_cfg.max_entries_rq1 = rq1_entries;
2272 pr_cfg.max_entries_rq2 = rq2_entries;
2273 pr_cfg.max_entries_rq3 = rq3_entries;
2275 pr_cfg_small_rx.max_entries_rcq = 1;
2276 pr_cfg_small_rx.max_entries_scq = sq_entries;
2277 pr_cfg_small_rx.max_entries_sq = sq_entries;
2278 pr_cfg_small_rx.max_entries_rq1 = 1;
2279 pr_cfg_small_rx.max_entries_rq2 = 1;
2280 pr_cfg_small_rx.max_entries_rq3 = 1;
2282 for (i = 0; i < def_qps; i++) {
2283 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2287 for (i = def_qps; i < def_qps; i++) {
2288 ret = ehea_init_port_res(port, &port->port_res[i],
2289 &pr_cfg_small_rx, i);
2298 ehea_clean_portres(port, &port->port_res[i]);
2301 ehea_destroy_eq(port->qp_eq);
2305 static int ehea_clean_all_portres(struct ehea_port *port)
2310 for (i = 0; i < port->num_def_qps; i++)
2311 ret |= ehea_clean_portres(port, &port->port_res[i]);
2313 ret |= ehea_destroy_eq(port->qp_eq);
2318 static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2320 if (adapter->active_ports)
2323 ehea_rem_mr(&adapter->mr);
2326 static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2328 if (adapter->active_ports)
2331 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2334 static int ehea_up(struct net_device *dev)
2337 struct ehea_port *port = netdev_priv(dev);
2339 if (port->state == EHEA_PORT_UP)
2342 ret = ehea_port_res_setup(port, port->num_def_qps);
2344 netdev_err(dev, "port_res_failed\n");
2348 /* Set default QP for this port */
2349 ret = ehea_configure_port(port);
2351 netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2355 ret = ehea_reg_interrupts(dev);
2357 netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2361 for (i = 0; i < port->num_def_qps; i++) {
2362 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2364 netdev_err(dev, "activate_qp failed\n");
2369 for (i = 0; i < port->num_def_qps; i++) {
2370 ret = ehea_fill_port_res(&port->port_res[i]);
2372 netdev_err(dev, "out_free_irqs\n");
2377 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2383 port->state = EHEA_PORT_UP;
2389 ehea_free_interrupts(dev);
2392 ehea_clean_all_portres(port);
2395 netdev_info(dev, "Failed starting. ret=%i\n", ret);
2397 ehea_update_bcmc_registrations();
2398 ehea_update_firmware_handles();
2403 static void port_napi_disable(struct ehea_port *port)
2407 for (i = 0; i < port->num_def_qps; i++)
2408 napi_disable(&port->port_res[i].napi);
2411 static void port_napi_enable(struct ehea_port *port)
2415 for (i = 0; i < port->num_def_qps; i++)
2416 napi_enable(&port->port_res[i].napi);
2419 static int ehea_open(struct net_device *dev)
2422 struct ehea_port *port = netdev_priv(dev);
2424 mutex_lock(&port->port_lock);
2426 netif_info(port, ifup, dev, "enabling port\n");
2428 netif_carrier_off(dev);
2432 port_napi_enable(port);
2433 netif_tx_start_all_queues(dev);
2436 mutex_unlock(&port->port_lock);
2437 schedule_delayed_work(&port->stats_work,
2438 round_jiffies_relative(msecs_to_jiffies(1000)));
2443 static int ehea_down(struct net_device *dev)
2446 struct ehea_port *port = netdev_priv(dev);
2448 if (port->state == EHEA_PORT_DOWN)
2451 ehea_drop_multicast_list(dev);
2452 ehea_allmulti(dev, 0);
2453 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2455 ehea_free_interrupts(dev);
2457 port->state = EHEA_PORT_DOWN;
2459 ehea_update_bcmc_registrations();
2461 ret = ehea_clean_all_portres(port);
2463 netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2465 ehea_update_firmware_handles();
2470 static int ehea_stop(struct net_device *dev)
2473 struct ehea_port *port = netdev_priv(dev);
2475 netif_info(port, ifdown, dev, "disabling port\n");
2477 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2478 cancel_work_sync(&port->reset_task);
2479 cancel_delayed_work_sync(&port->stats_work);
2480 mutex_lock(&port->port_lock);
2481 netif_tx_stop_all_queues(dev);
2482 port_napi_disable(port);
2483 ret = ehea_down(dev);
2484 mutex_unlock(&port->port_lock);
2485 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2489 static void ehea_purge_sq(struct ehea_qp *orig_qp)
2491 struct ehea_qp qp = *orig_qp;
2492 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2493 struct ehea_swqe *swqe;
2497 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2498 swqe = ehea_get_swqe(&qp, &wqe_index);
2499 swqe->tx_control |= EHEA_SWQE_PURGE;
2503 static void ehea_flush_sq(struct ehea_port *port)
2507 for (i = 0; i < port->num_def_qps; i++) {
2508 struct ehea_port_res *pr = &port->port_res[i];
2509 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2512 ret = wait_event_timeout(port->swqe_avail_wq,
2513 atomic_read(&pr->swqe_avail) >= swqe_max,
2514 msecs_to_jiffies(100));
2517 pr_err("WARNING: sq not flushed completely\n");
2523 static int ehea_stop_qps(struct net_device *dev)
2525 struct ehea_port *port = netdev_priv(dev);
2526 struct ehea_adapter *adapter = port->adapter;
2527 struct hcp_modify_qp_cb0 *cb0;
2535 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2541 for (i = 0; i < (port->num_def_qps); i++) {
2542 struct ehea_port_res *pr = &port->port_res[i];
2543 struct ehea_qp *qp = pr->qp;
2545 /* Purge send queue */
2548 /* Disable queue pair */
2549 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2550 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2552 if (hret != H_SUCCESS) {
2553 pr_err("query_ehea_qp failed (1)\n");
2557 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2558 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2560 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2561 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2563 &dummy64, &dummy16, &dummy16);
2564 if (hret != H_SUCCESS) {
2565 pr_err("modify_ehea_qp failed (1)\n");
2569 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2570 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2572 if (hret != H_SUCCESS) {
2573 pr_err("query_ehea_qp failed (2)\n");
2577 /* deregister shared memory regions */
2578 dret = ehea_rem_smrs(pr);
2580 pr_err("unreg shared memory region failed\n");
2587 free_page((unsigned long)cb0);
2592 static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2594 struct ehea_qp qp = *orig_qp;
2595 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2596 struct ehea_rwqe *rwqe;
2597 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2598 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2599 struct sk_buff *skb;
2600 u32 lkey = pr->recv_mr.lkey;
2606 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2607 rwqe = ehea_get_next_rwqe(&qp, 2);
2608 rwqe->sg_list[0].l_key = lkey;
2609 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2610 skb = skba_rq2[index];
2612 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2615 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2616 rwqe = ehea_get_next_rwqe(&qp, 3);
2617 rwqe->sg_list[0].l_key = lkey;
2618 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2619 skb = skba_rq3[index];
2621 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2625 static int ehea_restart_qps(struct net_device *dev)
2627 struct ehea_port *port = netdev_priv(dev);
2628 struct ehea_adapter *adapter = port->adapter;
2632 struct hcp_modify_qp_cb0 *cb0;
2637 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2643 for (i = 0; i < (port->num_def_qps); i++) {
2644 struct ehea_port_res *pr = &port->port_res[i];
2645 struct ehea_qp *qp = pr->qp;
2647 ret = ehea_gen_smrs(pr);
2649 netdev_err(dev, "creation of shared memory regions failed\n");
2653 ehea_update_rqs(qp, pr);
2655 /* Enable queue pair */
2656 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2657 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2659 if (hret != H_SUCCESS) {
2660 netdev_err(dev, "query_ehea_qp failed (1)\n");
2664 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2665 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2667 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2668 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2670 &dummy64, &dummy16, &dummy16);
2671 if (hret != H_SUCCESS) {
2672 netdev_err(dev, "modify_ehea_qp failed (1)\n");
2676 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2677 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2679 if (hret != H_SUCCESS) {
2680 netdev_err(dev, "query_ehea_qp failed (2)\n");
2684 /* refill entire queue */
2685 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2686 ehea_refill_rq2(pr, 0);
2687 ehea_refill_rq3(pr, 0);
2690 free_page((unsigned long)cb0);
2695 static void ehea_reset_port(struct work_struct *work)
2698 struct ehea_port *port =
2699 container_of(work, struct ehea_port, reset_task);
2700 struct net_device *dev = port->netdev;
2702 mutex_lock(&dlpar_mem_lock);
2704 mutex_lock(&port->port_lock);
2705 netif_tx_disable(dev);
2707 port_napi_disable(port);
2715 ehea_set_multicast_list(dev);
2717 netif_info(port, timer, dev, "reset successful\n");
2719 port_napi_enable(port);
2721 netif_tx_wake_all_queues(dev);
2723 mutex_unlock(&port->port_lock);
2724 mutex_unlock(&dlpar_mem_lock);
2727 static void ehea_rereg_mrs(void)
2730 struct ehea_adapter *adapter;
2732 pr_info("LPAR memory changed - re-initializing driver\n");
2734 list_for_each_entry(adapter, &adapter_list, list)
2735 if (adapter->active_ports) {
2736 /* Shutdown all ports */
2737 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2738 struct ehea_port *port = adapter->port[i];
2739 struct net_device *dev;
2746 if (dev->flags & IFF_UP) {
2747 mutex_lock(&port->port_lock);
2748 netif_tx_disable(dev);
2749 ehea_flush_sq(port);
2750 ret = ehea_stop_qps(dev);
2752 mutex_unlock(&port->port_lock);
2755 port_napi_disable(port);
2756 mutex_unlock(&port->port_lock);
2758 reset_sq_restart_flag(port);
2761 /* Unregister old memory region */
2762 ret = ehea_rem_mr(&adapter->mr);
2764 pr_err("unregister MR failed - driver inoperable!\n");
2769 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2771 list_for_each_entry(adapter, &adapter_list, list)
2772 if (adapter->active_ports) {
2773 /* Register new memory region */
2774 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2776 pr_err("register MR failed - driver inoperable!\n");
2780 /* Restart all ports */
2781 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2782 struct ehea_port *port = adapter->port[i];
2785 struct net_device *dev = port->netdev;
2787 if (dev->flags & IFF_UP) {
2788 mutex_lock(&port->port_lock);
2789 ret = ehea_restart_qps(dev);
2792 port_napi_enable(port);
2793 netif_tx_wake_all_queues(dev);
2795 netdev_err(dev, "Unable to restart QPS\n");
2797 mutex_unlock(&port->port_lock);
2802 pr_info("re-initializing driver complete\n");
2807 static void ehea_tx_watchdog(struct net_device *dev)
2809 struct ehea_port *port = netdev_priv(dev);
2811 if (netif_carrier_ok(dev) &&
2812 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2813 ehea_schedule_port_reset(port);
2816 static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2818 struct hcp_query_ehea *cb;
2822 cb = (void *)get_zeroed_page(GFP_KERNEL);
2828 hret = ehea_h_query_ehea(adapter->handle, cb);
2830 if (hret != H_SUCCESS) {
2835 adapter->max_mc_mac = cb->max_mc_mac - 1;
2839 free_page((unsigned long)cb);
2844 static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2846 struct hcp_ehea_port_cb4 *cb4;
2852 /* (Try to) enable *jumbo frames */
2853 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
2855 pr_err("no mem for cb4\n");
2859 hret = ehea_h_query_ehea_port(port->adapter->handle,
2860 port->logical_port_id,
2862 H_PORT_CB4_JUMBO, cb4);
2863 if (hret == H_SUCCESS) {
2864 if (cb4->jumbo_frame)
2867 cb4->jumbo_frame = 1;
2868 hret = ehea_h_modify_ehea_port(port->adapter->
2875 if (hret == H_SUCCESS)
2881 free_page((unsigned long)cb4);
2887 static ssize_t ehea_show_port_id(struct device *dev,
2888 struct device_attribute *attr, char *buf)
2890 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2891 return sprintf(buf, "%d", port->logical_port_id);
2894 static DEVICE_ATTR(log_port_id, 0444, ehea_show_port_id, NULL);
2896 static void logical_port_release(struct device *dev)
2898 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2899 of_node_put(port->ofdev.dev.of_node);
2902 static struct device *ehea_register_port(struct ehea_port *port,
2903 struct device_node *dn)
2907 port->ofdev.dev.of_node = of_node_get(dn);
2908 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
2909 port->ofdev.dev.bus = &ibmebus_bus_type;
2911 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
2912 port->ofdev.dev.release = logical_port_release;
2914 ret = of_device_register(&port->ofdev);
2916 pr_err("failed to register device. ret=%d\n", ret);
2920 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2922 pr_err("failed to register attributes, ret=%d\n", ret);
2923 goto out_unreg_of_dev;
2926 return &port->ofdev.dev;
2929 of_device_unregister(&port->ofdev);
2934 static void ehea_unregister_port(struct ehea_port *port)
2936 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2937 of_device_unregister(&port->ofdev);
2940 static const struct net_device_ops ehea_netdev_ops = {
2941 .ndo_open = ehea_open,
2942 .ndo_stop = ehea_stop,
2943 .ndo_start_xmit = ehea_start_xmit,
2944 .ndo_get_stats64 = ehea_get_stats64,
2945 .ndo_set_mac_address = ehea_set_mac_addr,
2946 .ndo_validate_addr = eth_validate_addr,
2947 .ndo_set_rx_mode = ehea_set_multicast_list,
2948 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
2949 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
2950 .ndo_tx_timeout = ehea_tx_watchdog,
2953 static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2954 u32 logical_port_id,
2955 struct device_node *dn)
2958 struct net_device *dev;
2959 struct ehea_port *port;
2960 struct device *port_dev;
2963 /* allocate memory for the port structures */
2964 dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
2971 port = netdev_priv(dev);
2973 mutex_init(&port->port_lock);
2974 port->state = EHEA_PORT_DOWN;
2975 port->sig_comp_iv = sq_entries / 10;
2977 port->adapter = adapter;
2979 port->logical_port_id = logical_port_id;
2981 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
2983 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
2984 if (!port->mc_list) {
2986 goto out_free_ethdev;
2989 INIT_LIST_HEAD(&port->mc_list->list);
2991 ret = ehea_sense_port_attr(port);
2993 goto out_free_mc_list;
2995 netif_set_real_num_rx_queues(dev, port->num_def_qps);
2996 netif_set_real_num_tx_queues(dev, port->num_def_qps);
2998 port_dev = ehea_register_port(port, dn);
3000 goto out_free_mc_list;
3002 SET_NETDEV_DEV(dev, port_dev);
3004 /* initialize net_device structure */
3005 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3007 dev->netdev_ops = &ehea_netdev_ops;
3008 ehea_set_ethtool_ops(dev);
3010 dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
3011 NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
3012 dev->features = NETIF_F_SG | NETIF_F_TSO |
3013 NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
3014 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3015 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
3016 dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
3018 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3020 /* MTU range: 68 - 9022 */
3021 dev->min_mtu = ETH_MIN_MTU;
3022 dev->max_mtu = EHEA_MAX_PACKET_SIZE;
3024 INIT_WORK(&port->reset_task, ehea_reset_port);
3025 INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
3027 init_waitqueue_head(&port->swqe_avail_wq);
3028 init_waitqueue_head(&port->restart_wq);
3030 ret = register_netdev(dev);
3032 pr_err("register_netdev failed. ret=%d\n", ret);
3033 goto out_unreg_port;
3036 ret = ehea_get_jumboframe_status(port, &jumbo);
3038 netdev_err(dev, "failed determining jumbo frame status\n");
3040 netdev_info(dev, "Jumbo frames are %sabled\n",
3041 jumbo == 1 ? "en" : "dis");
3043 adapter->active_ports++;
3048 ehea_unregister_port(port);
3051 kfree(port->mc_list);
3057 pr_err("setting up logical port with id=%d failed, ret=%d\n",
3058 logical_port_id, ret);
3062 static void ehea_shutdown_single_port(struct ehea_port *port)
3064 struct ehea_adapter *adapter = port->adapter;
3066 cancel_work_sync(&port->reset_task);
3067 cancel_delayed_work_sync(&port->stats_work);
3068 unregister_netdev(port->netdev);
3069 ehea_unregister_port(port);
3070 kfree(port->mc_list);
3071 free_netdev(port->netdev);
3072 adapter->active_ports--;
3075 static int ehea_setup_ports(struct ehea_adapter *adapter)
3077 struct device_node *lhea_dn;
3078 struct device_node *eth_dn = NULL;
3080 const u32 *dn_log_port_id;
3083 lhea_dn = adapter->ofdev->dev.of_node;
3084 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3086 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3088 if (!dn_log_port_id) {
3089 pr_err("bad device node: eth_dn name=%pOF\n", eth_dn);
3093 if (ehea_add_adapter_mr(adapter)) {
3094 pr_err("creating MR failed\n");
3095 of_node_put(eth_dn);
3099 adapter->port[i] = ehea_setup_single_port(adapter,
3102 if (adapter->port[i])
3103 netdev_info(adapter->port[i]->netdev,
3104 "logical port id #%d\n", *dn_log_port_id);
3106 ehea_remove_adapter_mr(adapter);
3113 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3114 u32 logical_port_id)
3116 struct device_node *lhea_dn;
3117 struct device_node *eth_dn = NULL;
3118 const u32 *dn_log_port_id;
3120 lhea_dn = adapter->ofdev->dev.of_node;
3121 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3123 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3126 if (*dn_log_port_id == logical_port_id)
3133 static ssize_t ehea_probe_port(struct device *dev,
3134 struct device_attribute *attr,
3135 const char *buf, size_t count)
3137 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3138 struct ehea_port *port;
3139 struct device_node *eth_dn = NULL;
3142 u32 logical_port_id;
3144 sscanf(buf, "%d", &logical_port_id);
3146 port = ehea_get_port(adapter, logical_port_id);
3149 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3154 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3157 pr_info("no logical port with id %d found\n", logical_port_id);
3161 if (ehea_add_adapter_mr(adapter)) {
3162 pr_err("creating MR failed\n");
3163 of_node_put(eth_dn);
3167 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3169 of_node_put(eth_dn);
3172 for (i = 0; i < EHEA_MAX_PORTS; i++)
3173 if (!adapter->port[i]) {
3174 adapter->port[i] = port;
3178 netdev_info(port->netdev, "added: (logical port id=%d)\n",
3181 ehea_remove_adapter_mr(adapter);
3185 return (ssize_t) count;
3188 static ssize_t ehea_remove_port(struct device *dev,
3189 struct device_attribute *attr,
3190 const char *buf, size_t count)
3192 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3193 struct ehea_port *port;
3195 u32 logical_port_id;
3197 sscanf(buf, "%d", &logical_port_id);
3199 port = ehea_get_port(adapter, logical_port_id);
3202 netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3205 ehea_shutdown_single_port(port);
3207 for (i = 0; i < EHEA_MAX_PORTS; i++)
3208 if (adapter->port[i] == port) {
3209 adapter->port[i] = NULL;
3213 pr_err("removing port with logical port id=%d failed. port not configured.\n",
3218 ehea_remove_adapter_mr(adapter);
3220 return (ssize_t) count;
3223 static DEVICE_ATTR(probe_port, 0200, NULL, ehea_probe_port);
3224 static DEVICE_ATTR(remove_port, 0200, NULL, ehea_remove_port);
3226 static int ehea_create_device_sysfs(struct platform_device *dev)
3228 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3232 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3237 static void ehea_remove_device_sysfs(struct platform_device *dev)
3239 device_remove_file(&dev->dev, &dev_attr_probe_port);
3240 device_remove_file(&dev->dev, &dev_attr_remove_port);
3243 static int ehea_reboot_notifier(struct notifier_block *nb,
3244 unsigned long action, void *unused)
3246 if (action == SYS_RESTART) {
3247 pr_info("Reboot: freeing all eHEA resources\n");
3248 ibmebus_unregister_driver(&ehea_driver);
3253 static struct notifier_block ehea_reboot_nb = {
3254 .notifier_call = ehea_reboot_notifier,
3257 static int ehea_mem_notifier(struct notifier_block *nb,
3258 unsigned long action, void *data)
3260 int ret = NOTIFY_BAD;
3261 struct memory_notify *arg = data;
3263 mutex_lock(&dlpar_mem_lock);
3266 case MEM_CANCEL_OFFLINE:
3267 pr_info("memory offlining canceled");
3268 /* Fall through: re-add canceled memory block */
3271 pr_info("memory is going online");
3272 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3273 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3278 case MEM_GOING_OFFLINE:
3279 pr_info("memory is going offline");
3280 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3281 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3290 ehea_update_firmware_handles();
3294 mutex_unlock(&dlpar_mem_lock);
3298 static struct notifier_block ehea_mem_nb = {
3299 .notifier_call = ehea_mem_notifier,
3302 static void ehea_crash_handler(void)
3306 if (ehea_fw_handles.arr)
3307 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3308 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3309 ehea_fw_handles.arr[i].fwh,
3312 if (ehea_bcmc_regs.arr)
3313 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3314 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3315 ehea_bcmc_regs.arr[i].port_id,
3316 ehea_bcmc_regs.arr[i].reg_type,
3317 ehea_bcmc_regs.arr[i].macaddr,
3321 static atomic_t ehea_memory_hooks_registered;
3323 /* Register memory hooks on probe of first adapter */
3324 static int ehea_register_memory_hooks(void)
3328 if (atomic_inc_return(&ehea_memory_hooks_registered) > 1)
3331 ret = ehea_create_busmap();
3333 pr_info("ehea_create_busmap failed\n");
3337 ret = register_reboot_notifier(&ehea_reboot_nb);
3339 pr_info("register_reboot_notifier failed\n");
3343 ret = register_memory_notifier(&ehea_mem_nb);
3345 pr_info("register_memory_notifier failed\n");
3349 ret = crash_shutdown_register(ehea_crash_handler);
3351 pr_info("crash_shutdown_register failed\n");
3358 unregister_memory_notifier(&ehea_mem_nb);
3360 unregister_reboot_notifier(&ehea_reboot_nb);
3362 atomic_dec(&ehea_memory_hooks_registered);
3366 static void ehea_unregister_memory_hooks(void)
3368 /* Only remove the hooks if we've registered them */
3369 if (atomic_read(&ehea_memory_hooks_registered) == 0)
3372 unregister_reboot_notifier(&ehea_reboot_nb);
3373 if (crash_shutdown_unregister(ehea_crash_handler))
3374 pr_info("failed unregistering crash handler\n");
3375 unregister_memory_notifier(&ehea_mem_nb);
3378 static int ehea_probe_adapter(struct platform_device *dev)
3380 struct ehea_adapter *adapter;
3381 const u64 *adapter_handle;
3385 ret = ehea_register_memory_hooks();
3389 if (!dev || !dev->dev.of_node) {
3390 pr_err("Invalid ibmebus device probed\n");
3394 adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL);
3397 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3401 list_add(&adapter->list, &adapter_list);
3403 adapter->ofdev = dev;
3405 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3408 adapter->handle = *adapter_handle;
3410 if (!adapter->handle) {
3411 dev_err(&dev->dev, "failed getting handle for adapter"
3412 " '%pOF'\n", dev->dev.of_node);
3417 adapter->pd = EHEA_PD_ID;
3419 platform_set_drvdata(dev, adapter);
3422 /* initialize adapter and ports */
3423 /* get adapter properties */
3424 ret = ehea_sense_adapter_attr(adapter);
3426 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3430 adapter->neq = ehea_create_eq(adapter,
3431 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3432 if (!adapter->neq) {
3434 dev_err(&dev->dev, "NEQ creation failed\n");
3438 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3439 (unsigned long)adapter);
3441 ret = ehea_create_device_sysfs(dev);
3445 ret = ehea_setup_ports(adapter);
3447 dev_err(&dev->dev, "setup_ports failed\n");
3448 goto out_rem_dev_sysfs;
3451 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3452 ehea_interrupt_neq, 0,
3453 "ehea_neq", adapter);
3455 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3456 goto out_shutdown_ports;
3459 /* Handle any events that might be pending. */
3460 tasklet_hi_schedule(&adapter->neq_tasklet);
3466 for (i = 0; i < EHEA_MAX_PORTS; i++)
3467 if (adapter->port[i]) {
3468 ehea_shutdown_single_port(adapter->port[i]);
3469 adapter->port[i] = NULL;
3473 ehea_remove_device_sysfs(dev);
3476 ehea_destroy_eq(adapter->neq);
3479 list_del(&adapter->list);
3482 ehea_update_firmware_handles();
3487 static int ehea_remove(struct platform_device *dev)
3489 struct ehea_adapter *adapter = platform_get_drvdata(dev);
3492 for (i = 0; i < EHEA_MAX_PORTS; i++)
3493 if (adapter->port[i]) {
3494 ehea_shutdown_single_port(adapter->port[i]);
3495 adapter->port[i] = NULL;
3498 ehea_remove_device_sysfs(dev);
3500 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3501 tasklet_kill(&adapter->neq_tasklet);
3503 ehea_destroy_eq(adapter->neq);
3504 ehea_remove_adapter_mr(adapter);
3505 list_del(&adapter->list);
3507 ehea_update_firmware_handles();
3512 static int check_module_parm(void)
3516 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3517 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3518 pr_info("Bad parameter: rq1_entries\n");
3521 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3522 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3523 pr_info("Bad parameter: rq2_entries\n");
3526 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3527 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3528 pr_info("Bad parameter: rq3_entries\n");
3531 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3532 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3533 pr_info("Bad parameter: sq_entries\n");
3540 static ssize_t capabilities_show(struct device_driver *drv, char *buf)
3542 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3545 static DRIVER_ATTR_RO(capabilities);
3547 static int __init ehea_module_init(void)
3551 pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3553 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3554 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3556 mutex_init(&ehea_fw_handles.lock);
3557 spin_lock_init(&ehea_bcmc_regs.lock);
3559 ret = check_module_parm();
3563 ret = ibmebus_register_driver(&ehea_driver);
3565 pr_err("failed registering eHEA device driver on ebus\n");
3569 ret = driver_create_file(&ehea_driver.driver,
3570 &driver_attr_capabilities);
3572 pr_err("failed to register capabilities attribute, ret=%d\n",
3580 ibmebus_unregister_driver(&ehea_driver);
3585 static void __exit ehea_module_exit(void)
3587 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3588 ibmebus_unregister_driver(&ehea_driver);
3589 ehea_unregister_memory_hooks();
3590 kfree(ehea_fw_handles.arr);
3591 kfree(ehea_bcmc_regs.arr);
3592 ehea_destroy_busmap();
3595 module_init(ehea_module_init);
3596 module_exit(ehea_module_exit);