1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
62 #include <linux/if_arp.h>
65 #include <linux/ipv6.h>
66 #include <linux/irq.h>
67 #include <linux/kthread.h>
68 #include <linux/seq_file.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/workqueue.h>
78 #include <linux/if_vlan.h>
79 #include <linux/utsname.h>
83 static const char ibmvnic_driver_name[] = "ibmvnic";
84 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
86 MODULE_AUTHOR("Santiago Leon");
87 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
91 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
92 static int ibmvnic_remove(struct vio_dev *);
93 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
94 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
95 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
96 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
97 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
98 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
99 union sub_crq *sub_crq);
100 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
101 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
102 static int enable_scrq_irq(struct ibmvnic_adapter *,
103 struct ibmvnic_sub_crq_queue *);
104 static int disable_scrq_irq(struct ibmvnic_adapter *,
105 struct ibmvnic_sub_crq_queue *);
106 static int pending_scrq(struct ibmvnic_adapter *,
107 struct ibmvnic_sub_crq_queue *);
108 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
109 struct ibmvnic_sub_crq_queue *);
110 static int ibmvnic_poll(struct napi_struct *napi, int data);
111 static void send_map_query(struct ibmvnic_adapter *adapter);
112 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
113 static int send_request_unmap(struct ibmvnic_adapter *, u8);
114 static int send_login(struct ibmvnic_adapter *adapter);
115 static void send_cap_queries(struct ibmvnic_adapter *adapter);
116 static int init_sub_crqs(struct ibmvnic_adapter *);
117 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
118 static int ibmvnic_init(struct ibmvnic_adapter *);
119 static int ibmvnic_reset_init(struct ibmvnic_adapter *);
120 static void release_crq_queue(struct ibmvnic_adapter *);
121 static int __ibmvnic_set_mac(struct net_device *, u8 *);
122 static int init_crq_queue(struct ibmvnic_adapter *adapter);
123 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
125 struct ibmvnic_stat {
126 char name[ETH_GSTRING_LEN];
130 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
131 offsetof(struct ibmvnic_statistics, stat))
132 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
134 static const struct ibmvnic_stat ibmvnic_stats[] = {
135 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
136 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
137 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
138 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
139 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
140 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
141 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
142 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
143 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
144 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
145 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
146 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
147 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
148 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
149 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
150 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
151 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
152 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
153 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
154 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
155 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
156 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
159 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
160 unsigned long length, unsigned long *number,
163 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
166 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
173 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
174 struct ibmvnic_long_term_buff *ltb, int size)
176 struct device *dev = &adapter->vdev->dev;
180 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
184 dev_err(dev, "Couldn't alloc long term buffer\n");
187 ltb->map_id = adapter->map_id;
190 init_completion(&adapter->fw_done);
191 rc = send_request_map(adapter, ltb->addr,
192 ltb->size, ltb->map_id);
194 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
197 wait_for_completion(&adapter->fw_done);
199 if (adapter->fw_done_rc) {
200 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
201 adapter->fw_done_rc);
202 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
208 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
209 struct ibmvnic_long_term_buff *ltb)
211 struct device *dev = &adapter->vdev->dev;
216 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
217 adapter->reset_reason != VNIC_RESET_MOBILITY)
218 send_request_unmap(adapter, ltb->map_id);
219 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
222 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
223 struct ibmvnic_long_term_buff *ltb)
227 memset(ltb->buff, 0, ltb->size);
229 init_completion(&adapter->fw_done);
230 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
233 wait_for_completion(&adapter->fw_done);
235 if (adapter->fw_done_rc) {
236 dev_info(&adapter->vdev->dev,
237 "Reset failed, attempting to free and reallocate buffer\n");
238 free_long_term_buff(adapter, ltb);
239 return alloc_long_term_buff(adapter, ltb, ltb->size);
244 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
248 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
250 adapter->rx_pool[i].active = 0;
253 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
254 struct ibmvnic_rx_pool *pool)
256 int count = pool->size - atomic_read(&pool->available);
257 struct device *dev = &adapter->vdev->dev;
258 int buffers_added = 0;
259 unsigned long lpar_rc;
260 union sub_crq sub_crq;
273 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
274 be32_to_cpu(adapter->login_rsp_buf->
277 for (i = 0; i < count; ++i) {
278 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
280 dev_err(dev, "Couldn't replenish rx buff\n");
281 adapter->replenish_no_mem++;
285 index = pool->free_map[pool->next_free];
287 if (pool->rx_buff[index].skb)
288 dev_err(dev, "Inconsistent free_map!\n");
290 /* Copy the skb to the long term mapped DMA buffer */
291 offset = index * pool->buff_size;
292 dst = pool->long_term_buff.buff + offset;
293 memset(dst, 0, pool->buff_size);
294 dma_addr = pool->long_term_buff.addr + offset;
295 pool->rx_buff[index].data = dst;
297 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
298 pool->rx_buff[index].dma = dma_addr;
299 pool->rx_buff[index].skb = skb;
300 pool->rx_buff[index].pool_index = pool->index;
301 pool->rx_buff[index].size = pool->buff_size;
303 memset(&sub_crq, 0, sizeof(sub_crq));
304 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
305 sub_crq.rx_add.correlator =
306 cpu_to_be64((u64)&pool->rx_buff[index]);
307 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
308 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
310 /* The length field of the sCRQ is defined to be 24 bits so the
311 * buffer size needs to be left shifted by a byte before it is
312 * converted to big endian to prevent the last byte from being
315 #ifdef __LITTLE_ENDIAN__
318 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
320 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
322 if (lpar_rc != H_SUCCESS)
326 adapter->replenish_add_buff_success++;
327 pool->next_free = (pool->next_free + 1) % pool->size;
329 atomic_add(buffers_added, &pool->available);
333 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
334 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
335 pool->free_map[pool->next_free] = index;
336 pool->rx_buff[index].skb = NULL;
338 dev_kfree_skb_any(skb);
339 adapter->replenish_add_buff_failure++;
340 atomic_add(buffers_added, &pool->available);
342 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
343 /* Disable buffer pool replenishment and report carrier off if
344 * queue is closed or pending failover.
345 * Firmware guarantees that a signal will be sent to the
346 * driver, triggering a reset.
348 deactivate_rx_pools(adapter);
349 netif_carrier_off(adapter->netdev);
353 static void replenish_pools(struct ibmvnic_adapter *adapter)
357 adapter->replenish_task_cycles++;
358 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
360 if (adapter->rx_pool[i].active)
361 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
365 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
367 kfree(adapter->tx_stats_buffers);
368 kfree(adapter->rx_stats_buffers);
369 adapter->tx_stats_buffers = NULL;
370 adapter->rx_stats_buffers = NULL;
373 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
375 adapter->tx_stats_buffers =
376 kcalloc(IBMVNIC_MAX_QUEUES,
377 sizeof(struct ibmvnic_tx_queue_stats),
379 if (!adapter->tx_stats_buffers)
382 adapter->rx_stats_buffers =
383 kcalloc(IBMVNIC_MAX_QUEUES,
384 sizeof(struct ibmvnic_rx_queue_stats),
386 if (!adapter->rx_stats_buffers)
392 static void release_stats_token(struct ibmvnic_adapter *adapter)
394 struct device *dev = &adapter->vdev->dev;
396 if (!adapter->stats_token)
399 dma_unmap_single(dev, adapter->stats_token,
400 sizeof(struct ibmvnic_statistics),
402 adapter->stats_token = 0;
405 static int init_stats_token(struct ibmvnic_adapter *adapter)
407 struct device *dev = &adapter->vdev->dev;
410 stok = dma_map_single(dev, &adapter->stats,
411 sizeof(struct ibmvnic_statistics),
413 if (dma_mapping_error(dev, stok)) {
414 dev_err(dev, "Couldn't map stats buffer\n");
418 adapter->stats_token = stok;
419 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
423 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
425 struct ibmvnic_rx_pool *rx_pool;
430 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
431 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
433 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
434 for (i = 0; i < rx_scrqs; i++) {
435 rx_pool = &adapter->rx_pool[i];
437 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
439 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
440 free_long_term_buff(adapter, &rx_pool->long_term_buff);
441 rx_pool->buff_size = be64_to_cpu(size_array[i]);
442 alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
446 rc = reset_long_term_buff(adapter,
447 &rx_pool->long_term_buff);
453 for (j = 0; j < rx_pool->size; j++)
454 rx_pool->free_map[j] = j;
456 memset(rx_pool->rx_buff, 0,
457 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
459 atomic_set(&rx_pool->available, 0);
460 rx_pool->next_alloc = 0;
461 rx_pool->next_free = 0;
468 static void release_rx_pools(struct ibmvnic_adapter *adapter)
470 struct ibmvnic_rx_pool *rx_pool;
473 if (!adapter->rx_pool)
476 for (i = 0; i < adapter->num_active_rx_pools; i++) {
477 rx_pool = &adapter->rx_pool[i];
479 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
481 kfree(rx_pool->free_map);
482 free_long_term_buff(adapter, &rx_pool->long_term_buff);
484 if (!rx_pool->rx_buff)
487 for (j = 0; j < rx_pool->size; j++) {
488 if (rx_pool->rx_buff[j].skb) {
489 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
490 rx_pool->rx_buff[j].skb = NULL;
494 kfree(rx_pool->rx_buff);
497 kfree(adapter->rx_pool);
498 adapter->rx_pool = NULL;
499 adapter->num_active_rx_pools = 0;
502 static int init_rx_pools(struct net_device *netdev)
504 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
505 struct device *dev = &adapter->vdev->dev;
506 struct ibmvnic_rx_pool *rx_pool;
512 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
513 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
514 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
516 adapter->rx_pool = kcalloc(rxadd_subcrqs,
517 sizeof(struct ibmvnic_rx_pool),
519 if (!adapter->rx_pool) {
520 dev_err(dev, "Failed to allocate rx pools\n");
524 adapter->num_active_rx_pools = rxadd_subcrqs;
526 for (i = 0; i < rxadd_subcrqs; i++) {
527 rx_pool = &adapter->rx_pool[i];
529 netdev_dbg(adapter->netdev,
530 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
531 i, adapter->req_rx_add_entries_per_subcrq,
532 be64_to_cpu(size_array[i]));
534 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
536 rx_pool->buff_size = be64_to_cpu(size_array[i]);
539 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
541 if (!rx_pool->free_map) {
542 release_rx_pools(adapter);
546 rx_pool->rx_buff = kcalloc(rx_pool->size,
547 sizeof(struct ibmvnic_rx_buff),
549 if (!rx_pool->rx_buff) {
550 dev_err(dev, "Couldn't alloc rx buffers\n");
551 release_rx_pools(adapter);
555 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
556 rx_pool->size * rx_pool->buff_size)) {
557 release_rx_pools(adapter);
561 for (j = 0; j < rx_pool->size; ++j)
562 rx_pool->free_map[j] = j;
564 atomic_set(&rx_pool->available, 0);
565 rx_pool->next_alloc = 0;
566 rx_pool->next_free = 0;
572 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
573 struct ibmvnic_tx_pool *tx_pool)
577 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
581 memset(tx_pool->tx_buff, 0,
582 tx_pool->num_buffers *
583 sizeof(struct ibmvnic_tx_buff));
585 for (i = 0; i < tx_pool->num_buffers; i++)
586 tx_pool->free_map[i] = i;
588 tx_pool->consumer_index = 0;
589 tx_pool->producer_index = 0;
594 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
599 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
600 for (i = 0; i < tx_scrqs; i++) {
601 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
604 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
612 static void release_vpd_data(struct ibmvnic_adapter *adapter)
617 kfree(adapter->vpd->buff);
623 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
624 struct ibmvnic_tx_pool *tx_pool)
626 kfree(tx_pool->tx_buff);
627 kfree(tx_pool->free_map);
628 free_long_term_buff(adapter, &tx_pool->long_term_buff);
631 static void release_tx_pools(struct ibmvnic_adapter *adapter)
635 if (!adapter->tx_pool)
638 for (i = 0; i < adapter->num_active_tx_pools; i++) {
639 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
640 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
643 kfree(adapter->tx_pool);
644 adapter->tx_pool = NULL;
645 kfree(adapter->tso_pool);
646 adapter->tso_pool = NULL;
647 adapter->num_active_tx_pools = 0;
650 static int init_one_tx_pool(struct net_device *netdev,
651 struct ibmvnic_tx_pool *tx_pool,
652 int num_entries, int buf_size)
654 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
657 tx_pool->tx_buff = kcalloc(num_entries,
658 sizeof(struct ibmvnic_tx_buff),
660 if (!tx_pool->tx_buff)
663 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
664 num_entries * buf_size))
667 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
668 if (!tx_pool->free_map)
671 for (i = 0; i < num_entries; i++)
672 tx_pool->free_map[i] = i;
674 tx_pool->consumer_index = 0;
675 tx_pool->producer_index = 0;
676 tx_pool->num_buffers = num_entries;
677 tx_pool->buf_size = buf_size;
682 static int init_tx_pools(struct net_device *netdev)
684 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
688 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
689 adapter->tx_pool = kcalloc(tx_subcrqs,
690 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
691 if (!adapter->tx_pool)
694 adapter->tso_pool = kcalloc(tx_subcrqs,
695 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
696 if (!adapter->tso_pool)
699 adapter->num_active_tx_pools = tx_subcrqs;
701 for (i = 0; i < tx_subcrqs; i++) {
702 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
703 adapter->req_tx_entries_per_subcrq,
704 adapter->req_mtu + VLAN_HLEN);
706 release_tx_pools(adapter);
710 init_one_tx_pool(netdev, &adapter->tso_pool[i],
714 release_tx_pools(adapter);
722 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
726 if (adapter->napi_enabled)
729 for (i = 0; i < adapter->req_rx_queues; i++)
730 napi_enable(&adapter->napi[i]);
732 adapter->napi_enabled = true;
735 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
739 if (!adapter->napi_enabled)
742 for (i = 0; i < adapter->req_rx_queues; i++) {
743 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
744 napi_disable(&adapter->napi[i]);
747 adapter->napi_enabled = false;
750 static int init_napi(struct ibmvnic_adapter *adapter)
754 adapter->napi = kcalloc(adapter->req_rx_queues,
755 sizeof(struct napi_struct), GFP_KERNEL);
759 for (i = 0; i < adapter->req_rx_queues; i++) {
760 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
761 netif_napi_add(adapter->netdev, &adapter->napi[i],
762 ibmvnic_poll, NAPI_POLL_WEIGHT);
765 adapter->num_active_rx_napi = adapter->req_rx_queues;
769 static void release_napi(struct ibmvnic_adapter *adapter)
776 for (i = 0; i < adapter->num_active_rx_napi; i++) {
777 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
778 netif_napi_del(&adapter->napi[i]);
781 kfree(adapter->napi);
782 adapter->napi = NULL;
783 adapter->num_active_rx_napi = 0;
784 adapter->napi_enabled = false;
787 static int ibmvnic_login(struct net_device *netdev)
789 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
790 unsigned long timeout = msecs_to_jiffies(30000);
797 if (retry_count > IBMVNIC_MAX_QUEUES) {
798 netdev_warn(netdev, "Login attempts exceeded\n");
802 adapter->init_done_rc = 0;
803 reinit_completion(&adapter->init_done);
804 rc = send_login(adapter);
806 netdev_warn(netdev, "Unable to login\n");
810 if (!wait_for_completion_timeout(&adapter->init_done,
812 netdev_warn(netdev, "Login timed out\n");
816 if (adapter->init_done_rc == PARTIALSUCCESS) {
818 release_sub_crqs(adapter, 1);
822 "Received partial success, retrying...\n");
823 adapter->init_done_rc = 0;
824 reinit_completion(&adapter->init_done);
825 send_cap_queries(adapter);
826 if (!wait_for_completion_timeout(&adapter->init_done,
829 "Capabilities query timed out\n");
833 rc = init_sub_crqs(adapter);
836 "SCRQ initialization failed\n");
840 rc = init_sub_crq_irqs(adapter);
843 "SCRQ irq initialization failed\n");
846 } else if (adapter->init_done_rc) {
847 netdev_warn(netdev, "Adapter login failed\n");
852 __ibmvnic_set_mac(netdev, adapter->mac_addr);
857 static void release_login_buffer(struct ibmvnic_adapter *adapter)
859 kfree(adapter->login_buf);
860 adapter->login_buf = NULL;
863 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
865 kfree(adapter->login_rsp_buf);
866 adapter->login_rsp_buf = NULL;
869 static void release_resources(struct ibmvnic_adapter *adapter)
871 release_vpd_data(adapter);
873 release_tx_pools(adapter);
874 release_rx_pools(adapter);
876 release_napi(adapter);
877 release_login_rsp_buffer(adapter);
880 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
882 struct net_device *netdev = adapter->netdev;
883 unsigned long timeout = msecs_to_jiffies(30000);
884 union ibmvnic_crq crq;
888 netdev_dbg(netdev, "setting link state %d\n", link_state);
890 memset(&crq, 0, sizeof(crq));
891 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
892 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
893 crq.logical_link_state.link_state = link_state;
898 reinit_completion(&adapter->init_done);
899 rc = ibmvnic_send_crq(adapter, &crq);
901 netdev_err(netdev, "Failed to set link state\n");
905 if (!wait_for_completion_timeout(&adapter->init_done,
907 netdev_err(netdev, "timeout setting link state\n");
911 if (adapter->init_done_rc == 1) {
912 /* Partuial success, delay and re-send */
915 } else if (adapter->init_done_rc) {
916 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
917 adapter->init_done_rc);
918 return adapter->init_done_rc;
925 static int set_real_num_queues(struct net_device *netdev)
927 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
930 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
931 adapter->req_tx_queues, adapter->req_rx_queues);
933 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
935 netdev_err(netdev, "failed to set the number of tx queues\n");
939 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
941 netdev_err(netdev, "failed to set the number of rx queues\n");
946 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
948 struct device *dev = &adapter->vdev->dev;
949 union ibmvnic_crq crq;
953 if (adapter->vpd->buff)
954 len = adapter->vpd->len;
956 init_completion(&adapter->fw_done);
957 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
958 crq.get_vpd_size.cmd = GET_VPD_SIZE;
959 rc = ibmvnic_send_crq(adapter, &crq);
962 wait_for_completion(&adapter->fw_done);
964 if (!adapter->vpd->len)
967 if (!adapter->vpd->buff)
968 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
969 else if (adapter->vpd->len != len)
971 krealloc(adapter->vpd->buff,
972 adapter->vpd->len, GFP_KERNEL);
974 if (!adapter->vpd->buff) {
975 dev_err(dev, "Could allocate VPD buffer\n");
979 adapter->vpd->dma_addr =
980 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
982 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
983 dev_err(dev, "Could not map VPD buffer\n");
984 kfree(adapter->vpd->buff);
985 adapter->vpd->buff = NULL;
989 reinit_completion(&adapter->fw_done);
990 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
991 crq.get_vpd.cmd = GET_VPD;
992 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
993 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
994 rc = ibmvnic_send_crq(adapter, &crq);
996 kfree(adapter->vpd->buff);
997 adapter->vpd->buff = NULL;
1000 wait_for_completion(&adapter->fw_done);
1005 static int init_resources(struct ibmvnic_adapter *adapter)
1007 struct net_device *netdev = adapter->netdev;
1010 rc = set_real_num_queues(netdev);
1014 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1018 /* Vital Product Data (VPD) */
1019 rc = ibmvnic_get_vpd(adapter);
1021 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1025 adapter->map_id = 1;
1027 rc = init_napi(adapter);
1031 send_map_query(adapter);
1033 rc = init_rx_pools(netdev);
1037 rc = init_tx_pools(netdev);
1041 static int __ibmvnic_open(struct net_device *netdev)
1043 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1044 enum vnic_state prev_state = adapter->state;
1047 adapter->state = VNIC_OPENING;
1048 replenish_pools(adapter);
1049 ibmvnic_napi_enable(adapter);
1051 /* We're ready to receive frames, enable the sub-crq interrupts and
1052 * set the logical link state to up
1054 for (i = 0; i < adapter->req_rx_queues; i++) {
1055 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1056 if (prev_state == VNIC_CLOSED)
1057 enable_irq(adapter->rx_scrq[i]->irq);
1058 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1061 for (i = 0; i < adapter->req_tx_queues; i++) {
1062 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1063 if (prev_state == VNIC_CLOSED)
1064 enable_irq(adapter->tx_scrq[i]->irq);
1065 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1068 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1070 for (i = 0; i < adapter->req_rx_queues; i++)
1071 napi_disable(&adapter->napi[i]);
1072 release_resources(adapter);
1076 netif_tx_start_all_queues(netdev);
1078 if (prev_state == VNIC_CLOSED) {
1079 for (i = 0; i < adapter->req_rx_queues; i++)
1080 napi_schedule(&adapter->napi[i]);
1083 adapter->state = VNIC_OPEN;
1087 static int ibmvnic_open(struct net_device *netdev)
1089 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1092 /* If device failover is pending, just set device state and return.
1093 * Device operation will be handled by reset routine.
1095 if (adapter->failover_pending) {
1096 adapter->state = VNIC_OPEN;
1100 if (adapter->state != VNIC_CLOSED) {
1101 rc = ibmvnic_login(netdev);
1105 rc = init_resources(adapter);
1107 netdev_err(netdev, "failed to initialize resources\n");
1108 release_resources(adapter);
1113 rc = __ibmvnic_open(netdev);
1114 netif_carrier_on(netdev);
1119 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1121 struct ibmvnic_rx_pool *rx_pool;
1122 struct ibmvnic_rx_buff *rx_buff;
1127 if (!adapter->rx_pool)
1130 rx_scrqs = adapter->num_active_rx_pools;
1131 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1133 /* Free any remaining skbs in the rx buffer pools */
1134 for (i = 0; i < rx_scrqs; i++) {
1135 rx_pool = &adapter->rx_pool[i];
1136 if (!rx_pool || !rx_pool->rx_buff)
1139 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1140 for (j = 0; j < rx_entries; j++) {
1141 rx_buff = &rx_pool->rx_buff[j];
1142 if (rx_buff && rx_buff->skb) {
1143 dev_kfree_skb_any(rx_buff->skb);
1144 rx_buff->skb = NULL;
1150 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1151 struct ibmvnic_tx_pool *tx_pool)
1153 struct ibmvnic_tx_buff *tx_buff;
1157 if (!tx_pool || !tx_pool->tx_buff)
1160 tx_entries = tx_pool->num_buffers;
1162 for (i = 0; i < tx_entries; i++) {
1163 tx_buff = &tx_pool->tx_buff[i];
1164 if (tx_buff && tx_buff->skb) {
1165 dev_kfree_skb_any(tx_buff->skb);
1166 tx_buff->skb = NULL;
1171 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1176 if (!adapter->tx_pool || !adapter->tso_pool)
1179 tx_scrqs = adapter->num_active_tx_pools;
1181 /* Free any remaining skbs in the tx buffer pools */
1182 for (i = 0; i < tx_scrqs; i++) {
1183 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1184 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1185 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1189 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1191 struct net_device *netdev = adapter->netdev;
1194 if (adapter->tx_scrq) {
1195 for (i = 0; i < adapter->req_tx_queues; i++)
1196 if (adapter->tx_scrq[i]->irq) {
1198 "Disabling tx_scrq[%d] irq\n", i);
1199 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1200 disable_irq(adapter->tx_scrq[i]->irq);
1204 if (adapter->rx_scrq) {
1205 for (i = 0; i < adapter->req_rx_queues; i++) {
1206 if (adapter->rx_scrq[i]->irq) {
1208 "Disabling rx_scrq[%d] irq\n", i);
1209 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1210 disable_irq(adapter->rx_scrq[i]->irq);
1216 static void ibmvnic_cleanup(struct net_device *netdev)
1218 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1220 /* ensure that transmissions are stopped if called by do_reset */
1221 if (adapter->resetting)
1222 netif_tx_disable(netdev);
1224 netif_tx_stop_all_queues(netdev);
1226 ibmvnic_napi_disable(adapter);
1227 ibmvnic_disable_irqs(adapter);
1229 clean_rx_pools(adapter);
1230 clean_tx_pools(adapter);
1233 static int __ibmvnic_close(struct net_device *netdev)
1235 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1238 adapter->state = VNIC_CLOSING;
1239 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1242 adapter->state = VNIC_CLOSED;
1246 static int ibmvnic_close(struct net_device *netdev)
1248 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1251 /* If device failover is pending, just set device state and return.
1252 * Device operation will be handled by reset routine.
1254 if (adapter->failover_pending) {
1255 adapter->state = VNIC_CLOSED;
1259 rc = __ibmvnic_close(netdev);
1260 ibmvnic_cleanup(netdev);
1266 * build_hdr_data - creates L2/L3/L4 header data buffer
1267 * @hdr_field - bitfield determining needed headers
1268 * @skb - socket buffer
1269 * @hdr_len - array of header lengths
1270 * @tot_len - total length of data
1272 * Reads hdr_field to determine which headers are needed by firmware.
1273 * Builds a buffer containing these headers. Saves individual header
1274 * lengths and total buffer length to be used to build descriptors.
1276 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1277 int *hdr_len, u8 *hdr_data)
1282 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1283 hdr_len[0] = sizeof(struct vlan_ethhdr);
1285 hdr_len[0] = sizeof(struct ethhdr);
1287 if (skb->protocol == htons(ETH_P_IP)) {
1288 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1289 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1290 hdr_len[2] = tcp_hdrlen(skb);
1291 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1292 hdr_len[2] = sizeof(struct udphdr);
1293 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1294 hdr_len[1] = sizeof(struct ipv6hdr);
1295 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1296 hdr_len[2] = tcp_hdrlen(skb);
1297 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1298 hdr_len[2] = sizeof(struct udphdr);
1299 } else if (skb->protocol == htons(ETH_P_ARP)) {
1300 hdr_len[1] = arp_hdr_len(skb->dev);
1304 memset(hdr_data, 0, 120);
1305 if ((hdr_field >> 6) & 1) {
1306 hdr = skb_mac_header(skb);
1307 memcpy(hdr_data, hdr, hdr_len[0]);
1311 if ((hdr_field >> 5) & 1) {
1312 hdr = skb_network_header(skb);
1313 memcpy(hdr_data + len, hdr, hdr_len[1]);
1317 if ((hdr_field >> 4) & 1) {
1318 hdr = skb_transport_header(skb);
1319 memcpy(hdr_data + len, hdr, hdr_len[2]);
1326 * create_hdr_descs - create header and header extension descriptors
1327 * @hdr_field - bitfield determining needed headers
1328 * @data - buffer containing header data
1329 * @len - length of data buffer
1330 * @hdr_len - array of individual header lengths
1331 * @scrq_arr - descriptor array
1333 * Creates header and, if needed, header extension descriptors and
1334 * places them in a descriptor array, scrq_arr
1337 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1338 union sub_crq *scrq_arr)
1340 union sub_crq hdr_desc;
1346 while (tmp_len > 0) {
1347 cur = hdr_data + len - tmp_len;
1349 memset(&hdr_desc, 0, sizeof(hdr_desc));
1350 if (cur != hdr_data) {
1351 data = hdr_desc.hdr_ext.data;
1352 tmp = tmp_len > 29 ? 29 : tmp_len;
1353 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1354 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1355 hdr_desc.hdr_ext.len = tmp;
1357 data = hdr_desc.hdr.data;
1358 tmp = tmp_len > 24 ? 24 : tmp_len;
1359 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1360 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1361 hdr_desc.hdr.len = tmp;
1362 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1363 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1364 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1365 hdr_desc.hdr.flag = hdr_field << 1;
1367 memcpy(data, cur, tmp);
1369 *scrq_arr = hdr_desc;
1378 * build_hdr_descs_arr - build a header descriptor array
1379 * @skb - socket buffer
1380 * @num_entries - number of descriptors to be sent
1381 * @subcrq - first TX descriptor
1382 * @hdr_field - bit field determining which headers will be sent
1384 * This function will build a TX descriptor array with applicable
1385 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1388 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1389 int *num_entries, u8 hdr_field)
1391 int hdr_len[3] = {0, 0, 0};
1393 u8 *hdr_data = txbuff->hdr_data;
1395 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1397 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1398 txbuff->indir_arr + 1);
1401 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1402 struct net_device *netdev)
1404 /* For some backing devices, mishandling of small packets
1405 * can result in a loss of connection or TX stall. Device
1406 * architects recommend that no packet should be smaller
1407 * than the minimum MTU value provided to the driver, so
1408 * pad any packets to that length
1410 if (skb->len < netdev->min_mtu)
1411 return skb_put_padto(skb, netdev->min_mtu);
1416 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1418 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1419 int queue_num = skb_get_queue_mapping(skb);
1420 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1421 struct device *dev = &adapter->vdev->dev;
1422 struct ibmvnic_tx_buff *tx_buff = NULL;
1423 struct ibmvnic_sub_crq_queue *tx_scrq;
1424 struct ibmvnic_tx_pool *tx_pool;
1425 unsigned int tx_send_failed = 0;
1426 unsigned int tx_map_failed = 0;
1427 unsigned int tx_dropped = 0;
1428 unsigned int tx_packets = 0;
1429 unsigned int tx_bytes = 0;
1430 dma_addr_t data_dma_addr;
1431 struct netdev_queue *txq;
1432 unsigned long lpar_rc;
1433 union sub_crq tx_crq;
1434 unsigned int offset;
1435 int num_entries = 1;
1440 netdev_tx_t ret = NETDEV_TX_OK;
1442 if (adapter->resetting) {
1443 if (!netif_subqueue_stopped(netdev, skb))
1444 netif_stop_subqueue(netdev, queue_num);
1445 dev_kfree_skb_any(skb);
1453 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1459 if (skb_is_gso(skb))
1460 tx_pool = &adapter->tso_pool[queue_num];
1462 tx_pool = &adapter->tx_pool[queue_num];
1464 tx_scrq = adapter->tx_scrq[queue_num];
1465 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1466 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1467 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1469 index = tx_pool->free_map[tx_pool->consumer_index];
1471 if (index == IBMVNIC_INVALID_MAP) {
1472 dev_kfree_skb_any(skb);
1479 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1481 offset = index * tx_pool->buf_size;
1482 dst = tx_pool->long_term_buff.buff + offset;
1483 memset(dst, 0, tx_pool->buf_size);
1484 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1486 if (skb_shinfo(skb)->nr_frags) {
1490 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1491 cur = skb_headlen(skb);
1493 /* Copy the frags */
1494 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1495 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1498 page_address(skb_frag_page(frag)) +
1499 frag->page_offset, skb_frag_size(frag));
1500 cur += skb_frag_size(frag);
1503 skb_copy_from_linear_data(skb, dst, skb->len);
1506 tx_pool->consumer_index =
1507 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1509 tx_buff = &tx_pool->tx_buff[index];
1511 tx_buff->data_dma[0] = data_dma_addr;
1512 tx_buff->data_len[0] = skb->len;
1513 tx_buff->index = index;
1514 tx_buff->pool_index = queue_num;
1515 tx_buff->last_frag = true;
1517 memset(&tx_crq, 0, sizeof(tx_crq));
1518 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1519 tx_crq.v1.type = IBMVNIC_TX_DESC;
1520 tx_crq.v1.n_crq_elem = 1;
1521 tx_crq.v1.n_sge = 1;
1522 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1524 if (skb_is_gso(skb))
1525 tx_crq.v1.correlator =
1526 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1528 tx_crq.v1.correlator = cpu_to_be32(index);
1529 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1530 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1531 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1533 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1534 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1535 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1538 if (skb->protocol == htons(ETH_P_IP)) {
1539 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1540 proto = ip_hdr(skb)->protocol;
1541 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1542 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1543 proto = ipv6_hdr(skb)->nexthdr;
1546 if (proto == IPPROTO_TCP)
1547 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1548 else if (proto == IPPROTO_UDP)
1549 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1551 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1552 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1555 if (skb_is_gso(skb)) {
1556 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1557 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1560 /* determine if l2/3/4 headers are sent to firmware */
1561 if ((*hdrs >> 7) & 1) {
1562 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1563 tx_crq.v1.n_crq_elem = num_entries;
1564 tx_buff->num_entries = num_entries;
1565 tx_buff->indir_arr[0] = tx_crq;
1566 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1567 sizeof(tx_buff->indir_arr),
1569 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1570 dev_kfree_skb_any(skb);
1571 tx_buff->skb = NULL;
1572 if (!firmware_has_feature(FW_FEATURE_CMO))
1573 dev_err(dev, "tx: unable to map descriptor array\n");
1579 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1580 (u64)tx_buff->indir_dma,
1583 tx_buff->num_entries = num_entries;
1584 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1587 if (lpar_rc != H_SUCCESS) {
1588 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1589 dev_err_ratelimited(dev, "tx: send failed\n");
1590 dev_kfree_skb_any(skb);
1591 tx_buff->skb = NULL;
1593 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1594 /* Disable TX and report carrier off if queue is closed
1595 * or pending failover.
1596 * Firmware guarantees that a signal will be sent to the
1597 * driver, triggering a reset or some other action.
1599 netif_tx_stop_all_queues(netdev);
1600 netif_carrier_off(netdev);
1609 if (atomic_add_return(num_entries, &tx_scrq->used)
1610 >= adapter->req_tx_entries_per_subcrq) {
1611 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1612 netif_stop_subqueue(netdev, queue_num);
1616 tx_bytes += skb->len;
1617 txq->trans_start = jiffies;
1622 /* roll back consumer index and map array*/
1623 if (tx_pool->consumer_index == 0)
1624 tx_pool->consumer_index =
1625 tx_pool->num_buffers - 1;
1627 tx_pool->consumer_index--;
1628 tx_pool->free_map[tx_pool->consumer_index] = index;
1630 netdev->stats.tx_dropped += tx_dropped;
1631 netdev->stats.tx_bytes += tx_bytes;
1632 netdev->stats.tx_packets += tx_packets;
1633 adapter->tx_send_failed += tx_send_failed;
1634 adapter->tx_map_failed += tx_map_failed;
1635 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1636 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1637 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1642 static void ibmvnic_set_multi(struct net_device *netdev)
1644 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1645 struct netdev_hw_addr *ha;
1646 union ibmvnic_crq crq;
1648 memset(&crq, 0, sizeof(crq));
1649 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1650 crq.request_capability.cmd = REQUEST_CAPABILITY;
1652 if (netdev->flags & IFF_PROMISC) {
1653 if (!adapter->promisc_supported)
1656 if (netdev->flags & IFF_ALLMULTI) {
1657 /* Accept all multicast */
1658 memset(&crq, 0, sizeof(crq));
1659 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1660 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1661 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1662 ibmvnic_send_crq(adapter, &crq);
1663 } else if (netdev_mc_empty(netdev)) {
1664 /* Reject all multicast */
1665 memset(&crq, 0, sizeof(crq));
1666 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1667 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1668 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1669 ibmvnic_send_crq(adapter, &crq);
1671 /* Accept one or more multicast(s) */
1672 netdev_for_each_mc_addr(ha, netdev) {
1673 memset(&crq, 0, sizeof(crq));
1674 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1675 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1676 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1677 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1679 ibmvnic_send_crq(adapter, &crq);
1685 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
1687 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1688 union ibmvnic_crq crq;
1691 if (!is_valid_ether_addr(dev_addr)) {
1692 rc = -EADDRNOTAVAIL;
1696 memset(&crq, 0, sizeof(crq));
1697 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1698 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1699 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
1701 init_completion(&adapter->fw_done);
1702 rc = ibmvnic_send_crq(adapter, &crq);
1708 wait_for_completion(&adapter->fw_done);
1709 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1710 if (adapter->fw_done_rc) {
1717 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1721 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1723 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1724 struct sockaddr *addr = p;
1728 ether_addr_copy(adapter->mac_addr, addr->sa_data);
1729 if (adapter->state != VNIC_PROBED)
1730 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
1736 * do_reset returns zero if we are able to keep processing reset events, or
1737 * non-zero if we hit a fatal error and must halt.
1739 static int do_reset(struct ibmvnic_adapter *adapter,
1740 struct ibmvnic_rwi *rwi, u32 reset_state)
1742 u64 old_num_rx_queues, old_num_tx_queues;
1743 u64 old_num_rx_slots, old_num_tx_slots;
1744 struct net_device *netdev = adapter->netdev;
1747 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1750 netif_carrier_off(netdev);
1751 adapter->reset_reason = rwi->reset_reason;
1753 old_num_rx_queues = adapter->req_rx_queues;
1754 old_num_tx_queues = adapter->req_tx_queues;
1755 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1756 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
1758 ibmvnic_cleanup(netdev);
1760 if (adapter->reset_reason != VNIC_RESET_MOBILITY &&
1761 adapter->reset_reason != VNIC_RESET_FAILOVER) {
1762 rc = __ibmvnic_close(netdev);
1767 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1768 adapter->wait_for_reset) {
1769 release_resources(adapter);
1770 release_sub_crqs(adapter, 1);
1771 release_crq_queue(adapter);
1774 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1775 /* remove the closed state so when we call open it appears
1776 * we are coming from the probed state.
1778 adapter->state = VNIC_PROBED;
1780 if (adapter->wait_for_reset) {
1781 rc = init_crq_queue(adapter);
1782 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
1783 rc = ibmvnic_reenable_crq_queue(adapter);
1784 release_sub_crqs(adapter, 1);
1786 rc = ibmvnic_reset_crq(adapter);
1788 rc = vio_enable_interrupts(adapter->vdev);
1792 netdev_err(adapter->netdev,
1793 "Couldn't initialize crq. rc=%d\n", rc);
1797 rc = ibmvnic_reset_init(adapter);
1799 return IBMVNIC_INIT_FAILED;
1801 /* If the adapter was in PROBE state prior to the reset,
1804 if (reset_state == VNIC_PROBED)
1807 rc = ibmvnic_login(netdev);
1809 adapter->state = reset_state;
1813 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1814 adapter->wait_for_reset) {
1815 rc = init_resources(adapter);
1818 } else if (adapter->req_rx_queues != old_num_rx_queues ||
1819 adapter->req_tx_queues != old_num_tx_queues ||
1820 adapter->req_rx_add_entries_per_subcrq !=
1822 adapter->req_tx_entries_per_subcrq !=
1824 release_rx_pools(adapter);
1825 release_tx_pools(adapter);
1826 release_napi(adapter);
1827 release_vpd_data(adapter);
1829 rc = init_resources(adapter);
1834 rc = reset_tx_pools(adapter);
1838 rc = reset_rx_pools(adapter);
1842 ibmvnic_disable_irqs(adapter);
1844 adapter->state = VNIC_CLOSED;
1846 if (reset_state == VNIC_CLOSED)
1849 rc = __ibmvnic_open(netdev);
1851 if (list_empty(&adapter->rwi_list))
1852 adapter->state = VNIC_CLOSED;
1854 adapter->state = reset_state;
1860 for (i = 0; i < adapter->req_rx_queues; i++)
1861 napi_schedule(&adapter->napi[i]);
1863 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
1864 adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
1865 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
1867 netif_carrier_on(netdev);
1872 static int do_hard_reset(struct ibmvnic_adapter *adapter,
1873 struct ibmvnic_rwi *rwi, u32 reset_state)
1875 struct net_device *netdev = adapter->netdev;
1878 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
1881 netif_carrier_off(netdev);
1882 adapter->reset_reason = rwi->reset_reason;
1884 ibmvnic_cleanup(netdev);
1885 release_resources(adapter);
1886 release_sub_crqs(adapter, 0);
1887 release_crq_queue(adapter);
1889 /* remove the closed state so when we call open it appears
1890 * we are coming from the probed state.
1892 adapter->state = VNIC_PROBED;
1894 reinit_completion(&adapter->init_done);
1895 rc = init_crq_queue(adapter);
1897 netdev_err(adapter->netdev,
1898 "Couldn't initialize crq. rc=%d\n", rc);
1902 rc = ibmvnic_init(adapter);
1906 /* If the adapter was in PROBE state prior to the reset,
1909 if (reset_state == VNIC_PROBED)
1912 rc = ibmvnic_login(netdev);
1914 adapter->state = VNIC_PROBED;
1918 rc = init_resources(adapter);
1922 ibmvnic_disable_irqs(adapter);
1923 adapter->state = VNIC_CLOSED;
1925 if (reset_state == VNIC_CLOSED)
1928 rc = __ibmvnic_open(netdev);
1930 if (list_empty(&adapter->rwi_list))
1931 adapter->state = VNIC_CLOSED;
1933 adapter->state = reset_state;
1938 netif_carrier_on(netdev);
1943 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1945 struct ibmvnic_rwi *rwi;
1946 unsigned long flags;
1948 spin_lock_irqsave(&adapter->rwi_lock, flags);
1950 if (!list_empty(&adapter->rwi_list)) {
1951 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1953 list_del(&rwi->list);
1958 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
1962 static void free_all_rwi(struct ibmvnic_adapter *adapter)
1964 struct ibmvnic_rwi *rwi;
1966 rwi = get_next_rwi(adapter);
1969 rwi = get_next_rwi(adapter);
1973 static void __ibmvnic_reset(struct work_struct *work)
1975 struct ibmvnic_rwi *rwi;
1976 struct ibmvnic_adapter *adapter;
1977 bool we_lock_rtnl = false;
1981 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1983 /* netif_set_real_num_xx_queues needs to take rtnl lock here
1984 * unless wait_for_reset is set, in which case the rtnl lock
1985 * has already been taken before initializing the reset
1987 if (!adapter->wait_for_reset) {
1989 we_lock_rtnl = true;
1991 reset_state = adapter->state;
1993 rwi = get_next_rwi(adapter);
1995 if (adapter->force_reset_recovery) {
1996 adapter->force_reset_recovery = false;
1997 rc = do_hard_reset(adapter, rwi, reset_state);
1999 rc = do_reset(adapter, rwi, reset_state);
2002 if (rc && rc != IBMVNIC_INIT_FAILED &&
2003 !adapter->force_reset_recovery)
2006 rwi = get_next_rwi(adapter);
2009 if (adapter->wait_for_reset) {
2010 adapter->wait_for_reset = false;
2011 adapter->reset_done_rc = rc;
2012 complete(&adapter->reset_done);
2016 netdev_dbg(adapter->netdev, "Reset failed\n");
2017 free_all_rwi(adapter);
2020 adapter->resetting = false;
2025 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2026 enum ibmvnic_reset_reason reason)
2028 struct list_head *entry, *tmp_entry;
2029 struct ibmvnic_rwi *rwi, *tmp;
2030 struct net_device *netdev = adapter->netdev;
2031 unsigned long flags;
2034 if (adapter->state == VNIC_REMOVING ||
2035 adapter->state == VNIC_REMOVED ||
2036 adapter->failover_pending) {
2038 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2042 if (adapter->state == VNIC_PROBING) {
2043 netdev_warn(netdev, "Adapter reset during probe\n");
2044 ret = adapter->init_done_rc = EAGAIN;
2048 spin_lock_irqsave(&adapter->rwi_lock, flags);
2050 list_for_each(entry, &adapter->rwi_list) {
2051 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2052 if (tmp->reset_reason == reason) {
2053 netdev_dbg(netdev, "Skipping matching reset\n");
2054 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2060 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2062 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2063 ibmvnic_close(netdev);
2067 /* if we just received a transport event,
2068 * flush reset queue and process this reset
2070 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2071 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2074 rwi->reset_reason = reason;
2075 list_add_tail(&rwi->list, &adapter->rwi_list);
2076 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2077 adapter->resetting = true;
2078 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2079 schedule_work(&adapter->ibmvnic_reset);
2083 if (adapter->wait_for_reset)
2084 adapter->wait_for_reset = false;
2088 static void ibmvnic_tx_timeout(struct net_device *dev)
2090 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2092 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2095 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2096 struct ibmvnic_rx_buff *rx_buff)
2098 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2100 rx_buff->skb = NULL;
2102 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2103 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2105 atomic_dec(&pool->available);
2108 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2110 struct net_device *netdev = napi->dev;
2111 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2112 int scrq_num = (int)(napi - adapter->napi);
2113 int frames_processed = 0;
2116 while (frames_processed < budget) {
2117 struct sk_buff *skb;
2118 struct ibmvnic_rx_buff *rx_buff;
2119 union sub_crq *next;
2124 if (unlikely(adapter->resetting &&
2125 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2126 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2127 napi_complete_done(napi, frames_processed);
2128 return frames_processed;
2131 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2133 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2135 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2136 rx_comp.correlator);
2137 /* do error checking */
2138 if (next->rx_comp.rc) {
2139 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2140 be16_to_cpu(next->rx_comp.rc));
2141 /* free the entry */
2142 next->rx_comp.first = 0;
2143 dev_kfree_skb_any(rx_buff->skb);
2144 remove_buff_from_pool(adapter, rx_buff);
2146 } else if (!rx_buff->skb) {
2147 /* free the entry */
2148 next->rx_comp.first = 0;
2149 remove_buff_from_pool(adapter, rx_buff);
2153 length = be32_to_cpu(next->rx_comp.len);
2154 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2155 flags = next->rx_comp.flags;
2157 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2160 /* VLAN Header has been stripped by the system firmware and
2161 * needs to be inserted by the driver
2163 if (adapter->rx_vlan_header_insertion &&
2164 (flags & IBMVNIC_VLAN_STRIPPED))
2165 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2166 ntohs(next->rx_comp.vlan_tci));
2168 /* free the entry */
2169 next->rx_comp.first = 0;
2170 remove_buff_from_pool(adapter, rx_buff);
2172 skb_put(skb, length);
2173 skb->protocol = eth_type_trans(skb, netdev);
2174 skb_record_rx_queue(skb, scrq_num);
2176 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2177 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2178 skb->ip_summed = CHECKSUM_UNNECESSARY;
2182 napi_gro_receive(napi, skb); /* send it up */
2183 netdev->stats.rx_packets++;
2184 netdev->stats.rx_bytes += length;
2185 adapter->rx_stats_buffers[scrq_num].packets++;
2186 adapter->rx_stats_buffers[scrq_num].bytes += length;
2190 if (adapter->state != VNIC_CLOSING)
2191 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2193 if (frames_processed < budget) {
2194 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2195 napi_complete_done(napi, frames_processed);
2196 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2197 napi_reschedule(napi)) {
2198 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2202 return frames_processed;
2205 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2209 adapter->fallback.mtu = adapter->req_mtu;
2210 adapter->fallback.rx_queues = adapter->req_rx_queues;
2211 adapter->fallback.tx_queues = adapter->req_tx_queues;
2212 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2213 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2215 init_completion(&adapter->reset_done);
2216 adapter->wait_for_reset = true;
2217 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2220 wait_for_completion(&adapter->reset_done);
2223 if (adapter->reset_done_rc) {
2225 adapter->desired.mtu = adapter->fallback.mtu;
2226 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2227 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2228 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2229 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2231 init_completion(&adapter->reset_done);
2232 adapter->wait_for_reset = true;
2233 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2236 wait_for_completion(&adapter->reset_done);
2238 adapter->wait_for_reset = false;
2243 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2245 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2247 adapter->desired.mtu = new_mtu + ETH_HLEN;
2249 return wait_for_reset(adapter);
2252 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2253 struct net_device *dev,
2254 netdev_features_t features)
2256 /* Some backing hardware adapters can not
2257 * handle packets with a MSS less than 224
2258 * or with only one segment.
2260 if (skb_is_gso(skb)) {
2261 if (skb_shinfo(skb)->gso_size < 224 ||
2262 skb_shinfo(skb)->gso_segs == 1)
2263 features &= ~NETIF_F_GSO_MASK;
2269 static const struct net_device_ops ibmvnic_netdev_ops = {
2270 .ndo_open = ibmvnic_open,
2271 .ndo_stop = ibmvnic_close,
2272 .ndo_start_xmit = ibmvnic_xmit,
2273 .ndo_set_rx_mode = ibmvnic_set_multi,
2274 .ndo_set_mac_address = ibmvnic_set_mac,
2275 .ndo_validate_addr = eth_validate_addr,
2276 .ndo_tx_timeout = ibmvnic_tx_timeout,
2277 .ndo_change_mtu = ibmvnic_change_mtu,
2278 .ndo_features_check = ibmvnic_features_check,
2281 /* ethtool functions */
2283 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2284 struct ethtool_link_ksettings *cmd)
2286 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2289 rc = send_query_phys_parms(adapter);
2291 adapter->speed = SPEED_UNKNOWN;
2292 adapter->duplex = DUPLEX_UNKNOWN;
2294 cmd->base.speed = adapter->speed;
2295 cmd->base.duplex = adapter->duplex;
2296 cmd->base.port = PORT_FIBRE;
2297 cmd->base.phy_address = 0;
2298 cmd->base.autoneg = AUTONEG_ENABLE;
2303 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2304 struct ethtool_drvinfo *info)
2306 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2308 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2309 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2310 strlcpy(info->fw_version, adapter->fw_version,
2311 sizeof(info->fw_version));
2314 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2316 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2318 return adapter->msg_enable;
2321 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2323 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2325 adapter->msg_enable = data;
2328 static u32 ibmvnic_get_link(struct net_device *netdev)
2330 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2332 /* Don't need to send a query because we request a logical link up at
2333 * init and then we wait for link state indications
2335 return adapter->logical_link_state;
2338 static void ibmvnic_get_ringparam(struct net_device *netdev,
2339 struct ethtool_ringparam *ring)
2341 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2343 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2344 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2345 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2347 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2348 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2350 ring->rx_mini_max_pending = 0;
2351 ring->rx_jumbo_max_pending = 0;
2352 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2353 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2354 ring->rx_mini_pending = 0;
2355 ring->rx_jumbo_pending = 0;
2358 static int ibmvnic_set_ringparam(struct net_device *netdev,
2359 struct ethtool_ringparam *ring)
2361 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2365 adapter->desired.rx_entries = ring->rx_pending;
2366 adapter->desired.tx_entries = ring->tx_pending;
2368 ret = wait_for_reset(adapter);
2371 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2372 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2374 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2375 ring->rx_pending, ring->tx_pending,
2376 adapter->req_rx_add_entries_per_subcrq,
2377 adapter->req_tx_entries_per_subcrq);
2381 static void ibmvnic_get_channels(struct net_device *netdev,
2382 struct ethtool_channels *channels)
2384 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2386 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2387 channels->max_rx = adapter->max_rx_queues;
2388 channels->max_tx = adapter->max_tx_queues;
2390 channels->max_rx = IBMVNIC_MAX_QUEUES;
2391 channels->max_tx = IBMVNIC_MAX_QUEUES;
2394 channels->max_other = 0;
2395 channels->max_combined = 0;
2396 channels->rx_count = adapter->req_rx_queues;
2397 channels->tx_count = adapter->req_tx_queues;
2398 channels->other_count = 0;
2399 channels->combined_count = 0;
2402 static int ibmvnic_set_channels(struct net_device *netdev,
2403 struct ethtool_channels *channels)
2405 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2409 adapter->desired.rx_queues = channels->rx_count;
2410 adapter->desired.tx_queues = channels->tx_count;
2412 ret = wait_for_reset(adapter);
2415 (adapter->req_rx_queues != channels->rx_count ||
2416 adapter->req_tx_queues != channels->tx_count))
2418 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2419 channels->rx_count, channels->tx_count,
2420 adapter->req_rx_queues, adapter->req_tx_queues);
2425 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2427 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2430 switch (stringset) {
2432 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2433 i++, data += ETH_GSTRING_LEN)
2434 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2436 for (i = 0; i < adapter->req_tx_queues; i++) {
2437 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2438 data += ETH_GSTRING_LEN;
2440 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2441 data += ETH_GSTRING_LEN;
2443 snprintf(data, ETH_GSTRING_LEN,
2444 "tx%d_dropped_packets", i);
2445 data += ETH_GSTRING_LEN;
2448 for (i = 0; i < adapter->req_rx_queues; i++) {
2449 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2450 data += ETH_GSTRING_LEN;
2452 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2453 data += ETH_GSTRING_LEN;
2455 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2456 data += ETH_GSTRING_LEN;
2460 case ETH_SS_PRIV_FLAGS:
2461 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2462 strcpy(data + i * ETH_GSTRING_LEN,
2463 ibmvnic_priv_flags[i]);
2470 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2472 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2476 return ARRAY_SIZE(ibmvnic_stats) +
2477 adapter->req_tx_queues * NUM_TX_STATS +
2478 adapter->req_rx_queues * NUM_RX_STATS;
2479 case ETH_SS_PRIV_FLAGS:
2480 return ARRAY_SIZE(ibmvnic_priv_flags);
2486 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2487 struct ethtool_stats *stats, u64 *data)
2489 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2490 union ibmvnic_crq crq;
2494 memset(&crq, 0, sizeof(crq));
2495 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2496 crq.request_statistics.cmd = REQUEST_STATISTICS;
2497 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2498 crq.request_statistics.len =
2499 cpu_to_be32(sizeof(struct ibmvnic_statistics));
2501 /* Wait for data to be written */
2502 init_completion(&adapter->stats_done);
2503 rc = ibmvnic_send_crq(adapter, &crq);
2506 wait_for_completion(&adapter->stats_done);
2508 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2509 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2510 ibmvnic_stats[i].offset));
2512 for (j = 0; j < adapter->req_tx_queues; j++) {
2513 data[i] = adapter->tx_stats_buffers[j].packets;
2515 data[i] = adapter->tx_stats_buffers[j].bytes;
2517 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2521 for (j = 0; j < adapter->req_rx_queues; j++) {
2522 data[i] = adapter->rx_stats_buffers[j].packets;
2524 data[i] = adapter->rx_stats_buffers[j].bytes;
2526 data[i] = adapter->rx_stats_buffers[j].interrupts;
2531 static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2533 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2535 return adapter->priv_flags;
2538 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2540 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2541 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2544 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2546 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2550 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2551 .get_drvinfo = ibmvnic_get_drvinfo,
2552 .get_msglevel = ibmvnic_get_msglevel,
2553 .set_msglevel = ibmvnic_set_msglevel,
2554 .get_link = ibmvnic_get_link,
2555 .get_ringparam = ibmvnic_get_ringparam,
2556 .set_ringparam = ibmvnic_set_ringparam,
2557 .get_channels = ibmvnic_get_channels,
2558 .set_channels = ibmvnic_set_channels,
2559 .get_strings = ibmvnic_get_strings,
2560 .get_sset_count = ibmvnic_get_sset_count,
2561 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
2562 .get_link_ksettings = ibmvnic_get_link_ksettings,
2563 .get_priv_flags = ibmvnic_get_priv_flags,
2564 .set_priv_flags = ibmvnic_set_priv_flags,
2567 /* Routines for managing CRQs/sCRQs */
2569 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2570 struct ibmvnic_sub_crq_queue *scrq)
2575 free_irq(scrq->irq, scrq);
2576 irq_dispose_mapping(scrq->irq);
2580 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2581 atomic_set(&scrq->used, 0);
2584 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2585 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2589 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2593 for (i = 0; i < adapter->req_tx_queues; i++) {
2594 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2595 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2600 for (i = 0; i < adapter->req_rx_queues; i++) {
2601 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2602 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2610 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2611 struct ibmvnic_sub_crq_queue *scrq,
2614 struct device *dev = &adapter->vdev->dev;
2617 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2620 /* Close the sub-crqs */
2622 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2623 adapter->vdev->unit_address,
2625 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2628 netdev_err(adapter->netdev,
2629 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2634 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2636 free_pages((unsigned long)scrq->msgs, 2);
2640 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2643 struct device *dev = &adapter->vdev->dev;
2644 struct ibmvnic_sub_crq_queue *scrq;
2647 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2652 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2654 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2655 goto zero_page_failed;
2658 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2660 if (dma_mapping_error(dev, scrq->msg_token)) {
2661 dev_warn(dev, "Couldn't map crq queue messages page\n");
2665 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2666 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2668 if (rc == H_RESOURCE)
2669 rc = ibmvnic_reset_crq(adapter);
2671 if (rc == H_CLOSED) {
2672 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2674 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2678 scrq->adapter = adapter;
2679 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2680 spin_lock_init(&scrq->lock);
2682 netdev_dbg(adapter->netdev,
2683 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2684 scrq->crq_num, scrq->hw_irq, scrq->irq);
2689 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2692 free_pages((unsigned long)scrq->msgs, 2);
2699 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2703 if (adapter->tx_scrq) {
2704 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2705 if (!adapter->tx_scrq[i])
2708 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2710 if (adapter->tx_scrq[i]->irq) {
2711 free_irq(adapter->tx_scrq[i]->irq,
2712 adapter->tx_scrq[i]);
2713 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2714 adapter->tx_scrq[i]->irq = 0;
2717 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2721 kfree(adapter->tx_scrq);
2722 adapter->tx_scrq = NULL;
2723 adapter->num_active_tx_scrqs = 0;
2726 if (adapter->rx_scrq) {
2727 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
2728 if (!adapter->rx_scrq[i])
2731 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2733 if (adapter->rx_scrq[i]->irq) {
2734 free_irq(adapter->rx_scrq[i]->irq,
2735 adapter->rx_scrq[i]);
2736 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2737 adapter->rx_scrq[i]->irq = 0;
2740 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2744 kfree(adapter->rx_scrq);
2745 adapter->rx_scrq = NULL;
2746 adapter->num_active_rx_scrqs = 0;
2750 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2751 struct ibmvnic_sub_crq_queue *scrq)
2753 struct device *dev = &adapter->vdev->dev;
2756 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2757 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2759 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2764 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2765 struct ibmvnic_sub_crq_queue *scrq)
2767 struct device *dev = &adapter->vdev->dev;
2770 if (scrq->hw_irq > 0x100000000ULL) {
2771 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2775 if (adapter->resetting &&
2776 adapter->reset_reason == VNIC_RESET_MOBILITY) {
2777 u64 val = (0xff000000) | scrq->hw_irq;
2779 rc = plpar_hcall_norets(H_EOI, val);
2781 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2785 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2786 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2788 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2793 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2794 struct ibmvnic_sub_crq_queue *scrq)
2796 struct device *dev = &adapter->vdev->dev;
2797 struct ibmvnic_tx_pool *tx_pool;
2798 struct ibmvnic_tx_buff *txbuff;
2799 union sub_crq *next;
2805 while (pending_scrq(adapter, scrq)) {
2806 unsigned int pool = scrq->pool_index;
2807 int num_entries = 0;
2809 next = ibmvnic_next_scrq(adapter, scrq);
2810 for (i = 0; i < next->tx_comp.num_comps; i++) {
2811 if (next->tx_comp.rcs[i]) {
2812 dev_err(dev, "tx error %x\n",
2813 next->tx_comp.rcs[i]);
2816 index = be32_to_cpu(next->tx_comp.correlators[i]);
2817 if (index & IBMVNIC_TSO_POOL_MASK) {
2818 tx_pool = &adapter->tso_pool[pool];
2819 index &= ~IBMVNIC_TSO_POOL_MASK;
2821 tx_pool = &adapter->tx_pool[pool];
2824 txbuff = &tx_pool->tx_buff[index];
2826 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2827 if (!txbuff->data_dma[j])
2830 txbuff->data_dma[j] = 0;
2832 /* if sub_crq was sent indirectly */
2833 first = &txbuff->indir_arr[0].generic.first;
2834 if (*first == IBMVNIC_CRQ_CMD) {
2835 dma_unmap_single(dev, txbuff->indir_dma,
2836 sizeof(txbuff->indir_arr),
2841 if (txbuff->last_frag) {
2842 dev_kfree_skb_any(txbuff->skb);
2846 num_entries += txbuff->num_entries;
2848 tx_pool->free_map[tx_pool->producer_index] = index;
2849 tx_pool->producer_index =
2850 (tx_pool->producer_index + 1) %
2851 tx_pool->num_buffers;
2853 /* remove tx_comp scrq*/
2854 next->tx_comp.first = 0;
2856 if (atomic_sub_return(num_entries, &scrq->used) <=
2857 (adapter->req_tx_entries_per_subcrq / 2) &&
2858 __netif_subqueue_stopped(adapter->netdev,
2859 scrq->pool_index)) {
2860 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
2861 netdev_dbg(adapter->netdev, "Started queue %d\n",
2866 enable_scrq_irq(adapter, scrq);
2868 if (pending_scrq(adapter, scrq)) {
2869 disable_scrq_irq(adapter, scrq);
2876 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
2878 struct ibmvnic_sub_crq_queue *scrq = instance;
2879 struct ibmvnic_adapter *adapter = scrq->adapter;
2881 disable_scrq_irq(adapter, scrq);
2882 ibmvnic_complete_tx(adapter, scrq);
2887 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2889 struct ibmvnic_sub_crq_queue *scrq = instance;
2890 struct ibmvnic_adapter *adapter = scrq->adapter;
2892 /* When booting a kdump kernel we can hit pending interrupts
2893 * prior to completing driver initialization.
2895 if (unlikely(adapter->state != VNIC_OPEN))
2898 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2900 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
2901 disable_scrq_irq(adapter, scrq);
2902 __napi_schedule(&adapter->napi[scrq->scrq_num]);
2908 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
2910 struct device *dev = &adapter->vdev->dev;
2911 struct ibmvnic_sub_crq_queue *scrq;
2915 for (i = 0; i < adapter->req_tx_queues; i++) {
2916 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
2918 scrq = adapter->tx_scrq[i];
2919 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2923 dev_err(dev, "Error mapping irq\n");
2924 goto req_tx_irq_failed;
2927 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
2928 adapter->vdev->unit_address, i);
2929 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
2930 0, scrq->name, scrq);
2933 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
2935 irq_dispose_mapping(scrq->irq);
2936 goto req_tx_irq_failed;
2940 for (i = 0; i < adapter->req_rx_queues; i++) {
2941 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
2943 scrq = adapter->rx_scrq[i];
2944 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2947 dev_err(dev, "Error mapping irq\n");
2948 goto req_rx_irq_failed;
2950 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
2951 adapter->vdev->unit_address, i);
2952 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
2953 0, scrq->name, scrq);
2955 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
2957 irq_dispose_mapping(scrq->irq);
2958 goto req_rx_irq_failed;
2964 for (j = 0; j < i; j++) {
2965 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
2966 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2968 i = adapter->req_tx_queues;
2970 for (j = 0; j < i; j++) {
2971 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
2972 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2974 release_sub_crqs(adapter, 1);
2978 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
2980 struct device *dev = &adapter->vdev->dev;
2981 struct ibmvnic_sub_crq_queue **allqueues;
2982 int registered_queues = 0;
2987 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
2989 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
2993 for (i = 0; i < total_queues; i++) {
2994 allqueues[i] = init_sub_crq_queue(adapter);
2995 if (!allqueues[i]) {
2996 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
2999 registered_queues++;
3002 /* Make sure we were able to register the minimum number of queues */
3003 if (registered_queues <
3004 adapter->min_tx_queues + adapter->min_rx_queues) {
3005 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3009 /* Distribute the failed allocated queues*/
3010 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3011 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3014 if (adapter->req_rx_queues > adapter->min_rx_queues)
3015 adapter->req_rx_queues--;
3020 if (adapter->req_tx_queues > adapter->min_tx_queues)
3021 adapter->req_tx_queues--;
3028 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3029 sizeof(*adapter->tx_scrq), GFP_KERNEL);
3030 if (!adapter->tx_scrq)
3033 for (i = 0; i < adapter->req_tx_queues; i++) {
3034 adapter->tx_scrq[i] = allqueues[i];
3035 adapter->tx_scrq[i]->pool_index = i;
3036 adapter->num_active_tx_scrqs++;
3039 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3040 sizeof(*adapter->rx_scrq), GFP_KERNEL);
3041 if (!adapter->rx_scrq)
3044 for (i = 0; i < adapter->req_rx_queues; i++) {
3045 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3046 adapter->rx_scrq[i]->scrq_num = i;
3047 adapter->num_active_rx_scrqs++;
3054 kfree(adapter->tx_scrq);
3055 adapter->tx_scrq = NULL;
3057 for (i = 0; i < registered_queues; i++)
3058 release_sub_crq_queue(adapter, allqueues[i], 1);
3063 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
3065 struct device *dev = &adapter->vdev->dev;
3066 union ibmvnic_crq crq;
3070 /* Sub-CRQ entries are 32 byte long */
3071 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3073 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3074 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3075 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3079 if (adapter->desired.mtu)
3080 adapter->req_mtu = adapter->desired.mtu;
3082 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3084 if (!adapter->desired.tx_entries)
3085 adapter->desired.tx_entries =
3086 adapter->max_tx_entries_per_subcrq;
3087 if (!adapter->desired.rx_entries)
3088 adapter->desired.rx_entries =
3089 adapter->max_rx_add_entries_per_subcrq;
3091 max_entries = IBMVNIC_MAX_LTB_SIZE /
3092 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3094 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3095 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3096 adapter->desired.tx_entries = max_entries;
3099 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3100 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3101 adapter->desired.rx_entries = max_entries;
3104 if (adapter->desired.tx_entries)
3105 adapter->req_tx_entries_per_subcrq =
3106 adapter->desired.tx_entries;
3108 adapter->req_tx_entries_per_subcrq =
3109 adapter->max_tx_entries_per_subcrq;
3111 if (adapter->desired.rx_entries)
3112 adapter->req_rx_add_entries_per_subcrq =
3113 adapter->desired.rx_entries;
3115 adapter->req_rx_add_entries_per_subcrq =
3116 adapter->max_rx_add_entries_per_subcrq;
3118 if (adapter->desired.tx_queues)
3119 adapter->req_tx_queues =
3120 adapter->desired.tx_queues;
3122 adapter->req_tx_queues =
3123 adapter->opt_tx_comp_sub_queues;
3125 if (adapter->desired.rx_queues)
3126 adapter->req_rx_queues =
3127 adapter->desired.rx_queues;
3129 adapter->req_rx_queues =
3130 adapter->opt_rx_comp_queues;
3132 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3135 memset(&crq, 0, sizeof(crq));
3136 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3137 crq.request_capability.cmd = REQUEST_CAPABILITY;
3139 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3140 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3141 atomic_inc(&adapter->running_cap_crqs);
3142 ibmvnic_send_crq(adapter, &crq);
3144 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3145 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3146 atomic_inc(&adapter->running_cap_crqs);
3147 ibmvnic_send_crq(adapter, &crq);
3149 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3150 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3151 atomic_inc(&adapter->running_cap_crqs);
3152 ibmvnic_send_crq(adapter, &crq);
3154 crq.request_capability.capability =
3155 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3156 crq.request_capability.number =
3157 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3158 atomic_inc(&adapter->running_cap_crqs);
3159 ibmvnic_send_crq(adapter, &crq);
3161 crq.request_capability.capability =
3162 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3163 crq.request_capability.number =
3164 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3165 atomic_inc(&adapter->running_cap_crqs);
3166 ibmvnic_send_crq(adapter, &crq);
3168 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3169 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3170 atomic_inc(&adapter->running_cap_crqs);
3171 ibmvnic_send_crq(adapter, &crq);
3173 if (adapter->netdev->flags & IFF_PROMISC) {
3174 if (adapter->promisc_supported) {
3175 crq.request_capability.capability =
3176 cpu_to_be16(PROMISC_REQUESTED);
3177 crq.request_capability.number = cpu_to_be64(1);
3178 atomic_inc(&adapter->running_cap_crqs);
3179 ibmvnic_send_crq(adapter, &crq);
3182 crq.request_capability.capability =
3183 cpu_to_be16(PROMISC_REQUESTED);
3184 crq.request_capability.number = cpu_to_be64(0);
3185 atomic_inc(&adapter->running_cap_crqs);
3186 ibmvnic_send_crq(adapter, &crq);
3190 static int pending_scrq(struct ibmvnic_adapter *adapter,
3191 struct ibmvnic_sub_crq_queue *scrq)
3193 union sub_crq *entry = &scrq->msgs[scrq->cur];
3195 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3201 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3202 struct ibmvnic_sub_crq_queue *scrq)
3204 union sub_crq *entry;
3205 unsigned long flags;
3207 spin_lock_irqsave(&scrq->lock, flags);
3208 entry = &scrq->msgs[scrq->cur];
3209 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3210 if (++scrq->cur == scrq->size)
3215 spin_unlock_irqrestore(&scrq->lock, flags);
3220 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3222 struct ibmvnic_crq_queue *queue = &adapter->crq;
3223 union ibmvnic_crq *crq;
3225 crq = &queue->msgs[queue->cur];
3226 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3227 if (++queue->cur == queue->size)
3236 static void print_subcrq_error(struct device *dev, int rc, const char *func)
3240 dev_warn_ratelimited(dev,
3241 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3245 dev_warn_ratelimited(dev,
3246 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3250 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3255 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3256 union sub_crq *sub_crq)
3258 unsigned int ua = adapter->vdev->unit_address;
3259 struct device *dev = &adapter->vdev->dev;
3260 u64 *u64_crq = (u64 *)sub_crq;
3263 netdev_dbg(adapter->netdev,
3264 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3265 (unsigned long int)cpu_to_be64(remote_handle),
3266 (unsigned long int)cpu_to_be64(u64_crq[0]),
3267 (unsigned long int)cpu_to_be64(u64_crq[1]),
3268 (unsigned long int)cpu_to_be64(u64_crq[2]),
3269 (unsigned long int)cpu_to_be64(u64_crq[3]));
3271 /* Make sure the hypervisor sees the complete request */
3274 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3275 cpu_to_be64(remote_handle),
3276 cpu_to_be64(u64_crq[0]),
3277 cpu_to_be64(u64_crq[1]),
3278 cpu_to_be64(u64_crq[2]),
3279 cpu_to_be64(u64_crq[3]));
3282 print_subcrq_error(dev, rc, __func__);
3287 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3288 u64 remote_handle, u64 ioba, u64 num_entries)
3290 unsigned int ua = adapter->vdev->unit_address;
3291 struct device *dev = &adapter->vdev->dev;
3294 /* Make sure the hypervisor sees the complete request */
3296 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3297 cpu_to_be64(remote_handle),
3301 print_subcrq_error(dev, rc, __func__);
3306 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3307 union ibmvnic_crq *crq)
3309 unsigned int ua = adapter->vdev->unit_address;
3310 struct device *dev = &adapter->vdev->dev;
3311 u64 *u64_crq = (u64 *)crq;
3314 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3315 (unsigned long int)cpu_to_be64(u64_crq[0]),
3316 (unsigned long int)cpu_to_be64(u64_crq[1]));
3318 if (!adapter->crq.active &&
3319 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3320 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3324 /* Make sure the hypervisor sees the complete request */
3327 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3328 cpu_to_be64(u64_crq[0]),
3329 cpu_to_be64(u64_crq[1]));
3332 if (rc == H_CLOSED) {
3333 dev_warn(dev, "CRQ Queue closed\n");
3334 if (adapter->resetting)
3335 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3338 dev_warn(dev, "Send error (rc=%d)\n", rc);
3344 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3346 union ibmvnic_crq crq;
3348 memset(&crq, 0, sizeof(crq));
3349 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3350 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3351 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3353 return ibmvnic_send_crq(adapter, &crq);
3356 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3358 union ibmvnic_crq crq;
3360 memset(&crq, 0, sizeof(crq));
3361 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3362 crq.version_exchange.cmd = VERSION_EXCHANGE;
3363 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3365 return ibmvnic_send_crq(adapter, &crq);
3368 struct vnic_login_client_data {
3374 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3378 /* Calculate the amount of buffer space needed for the
3379 * vnic client data in the login buffer. There are four entries,
3380 * OS name, LPAR name, device name, and a null last entry.
3382 len = 4 * sizeof(struct vnic_login_client_data);
3383 len += 6; /* "Linux" plus NULL */
3384 len += strlen(utsname()->nodename) + 1;
3385 len += strlen(adapter->netdev->name) + 1;
3390 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3391 struct vnic_login_client_data *vlcd)
3393 const char *os_name = "Linux";
3396 /* Type 1 - LPAR OS */
3398 len = strlen(os_name) + 1;
3399 vlcd->len = cpu_to_be16(len);
3400 strncpy(vlcd->name, os_name, len);
3401 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3403 /* Type 2 - LPAR name */
3405 len = strlen(utsname()->nodename) + 1;
3406 vlcd->len = cpu_to_be16(len);
3407 strncpy(vlcd->name, utsname()->nodename, len);
3408 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3410 /* Type 3 - device name */
3412 len = strlen(adapter->netdev->name) + 1;
3413 vlcd->len = cpu_to_be16(len);
3414 strncpy(vlcd->name, adapter->netdev->name, len);
3417 static int send_login(struct ibmvnic_adapter *adapter)
3419 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3420 struct ibmvnic_login_buffer *login_buffer;
3421 struct device *dev = &adapter->vdev->dev;
3422 dma_addr_t rsp_buffer_token;
3423 dma_addr_t buffer_token;
3424 size_t rsp_buffer_size;
3425 union ibmvnic_crq crq;
3429 int client_data_len;
3430 struct vnic_login_client_data *vlcd;
3433 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3434 netdev_err(adapter->netdev,
3435 "RX or TX queues are not allocated, device login failed\n");
3439 release_login_rsp_buffer(adapter);
3440 client_data_len = vnic_client_data_len(adapter);
3443 sizeof(struct ibmvnic_login_buffer) +
3444 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3447 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3449 goto buf_alloc_failed;
3451 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3453 if (dma_mapping_error(dev, buffer_token)) {
3454 dev_err(dev, "Couldn't map login buffer\n");
3455 goto buf_map_failed;
3458 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3459 sizeof(u64) * adapter->req_tx_queues +
3460 sizeof(u64) * adapter->req_rx_queues +
3461 sizeof(u64) * adapter->req_rx_queues +
3462 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3464 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3465 if (!login_rsp_buffer)
3466 goto buf_rsp_alloc_failed;
3468 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3469 rsp_buffer_size, DMA_FROM_DEVICE);
3470 if (dma_mapping_error(dev, rsp_buffer_token)) {
3471 dev_err(dev, "Couldn't map login rsp buffer\n");
3472 goto buf_rsp_map_failed;
3475 adapter->login_buf = login_buffer;
3476 adapter->login_buf_token = buffer_token;
3477 adapter->login_buf_sz = buffer_size;
3478 adapter->login_rsp_buf = login_rsp_buffer;
3479 adapter->login_rsp_buf_token = rsp_buffer_token;
3480 adapter->login_rsp_buf_sz = rsp_buffer_size;
3482 login_buffer->len = cpu_to_be32(buffer_size);
3483 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3484 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3485 login_buffer->off_txcomp_subcrqs =
3486 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3487 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3488 login_buffer->off_rxcomp_subcrqs =
3489 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3490 sizeof(u64) * adapter->req_tx_queues);
3491 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3492 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3494 tx_list_p = (__be64 *)((char *)login_buffer +
3495 sizeof(struct ibmvnic_login_buffer));
3496 rx_list_p = (__be64 *)((char *)login_buffer +
3497 sizeof(struct ibmvnic_login_buffer) +
3498 sizeof(u64) * adapter->req_tx_queues);
3500 for (i = 0; i < adapter->req_tx_queues; i++) {
3501 if (adapter->tx_scrq[i]) {
3502 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3507 for (i = 0; i < adapter->req_rx_queues; i++) {
3508 if (adapter->rx_scrq[i]) {
3509 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3514 /* Insert vNIC login client data */
3515 vlcd = (struct vnic_login_client_data *)
3516 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3517 login_buffer->client_data_offset =
3518 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3519 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3521 vnic_add_client_data(adapter, vlcd);
3523 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3524 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3525 netdev_dbg(adapter->netdev, "%016lx\n",
3526 ((unsigned long int *)(adapter->login_buf))[i]);
3529 memset(&crq, 0, sizeof(crq));
3530 crq.login.first = IBMVNIC_CRQ_CMD;
3531 crq.login.cmd = LOGIN;
3532 crq.login.ioba = cpu_to_be32(buffer_token);
3533 crq.login.len = cpu_to_be32(buffer_size);
3534 ibmvnic_send_crq(adapter, &crq);
3539 kfree(login_rsp_buffer);
3540 buf_rsp_alloc_failed:
3541 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3543 kfree(login_buffer);
3548 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3551 union ibmvnic_crq crq;
3553 memset(&crq, 0, sizeof(crq));
3554 crq.request_map.first = IBMVNIC_CRQ_CMD;
3555 crq.request_map.cmd = REQUEST_MAP;
3556 crq.request_map.map_id = map_id;
3557 crq.request_map.ioba = cpu_to_be32(addr);
3558 crq.request_map.len = cpu_to_be32(len);
3559 return ibmvnic_send_crq(adapter, &crq);
3562 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3564 union ibmvnic_crq crq;
3566 memset(&crq, 0, sizeof(crq));
3567 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3568 crq.request_unmap.cmd = REQUEST_UNMAP;
3569 crq.request_unmap.map_id = map_id;
3570 return ibmvnic_send_crq(adapter, &crq);
3573 static void send_map_query(struct ibmvnic_adapter *adapter)
3575 union ibmvnic_crq crq;
3577 memset(&crq, 0, sizeof(crq));
3578 crq.query_map.first = IBMVNIC_CRQ_CMD;
3579 crq.query_map.cmd = QUERY_MAP;
3580 ibmvnic_send_crq(adapter, &crq);
3583 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3584 static void send_cap_queries(struct ibmvnic_adapter *adapter)
3586 union ibmvnic_crq crq;
3588 atomic_set(&adapter->running_cap_crqs, 0);
3589 memset(&crq, 0, sizeof(crq));
3590 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3591 crq.query_capability.cmd = QUERY_CAPABILITY;
3593 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3594 atomic_inc(&adapter->running_cap_crqs);
3595 ibmvnic_send_crq(adapter, &crq);
3597 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3598 atomic_inc(&adapter->running_cap_crqs);
3599 ibmvnic_send_crq(adapter, &crq);
3601 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3602 atomic_inc(&adapter->running_cap_crqs);
3603 ibmvnic_send_crq(adapter, &crq);
3605 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3606 atomic_inc(&adapter->running_cap_crqs);
3607 ibmvnic_send_crq(adapter, &crq);
3609 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3610 atomic_inc(&adapter->running_cap_crqs);
3611 ibmvnic_send_crq(adapter, &crq);
3613 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3614 atomic_inc(&adapter->running_cap_crqs);
3615 ibmvnic_send_crq(adapter, &crq);
3617 crq.query_capability.capability =
3618 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3619 atomic_inc(&adapter->running_cap_crqs);
3620 ibmvnic_send_crq(adapter, &crq);
3622 crq.query_capability.capability =
3623 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3624 atomic_inc(&adapter->running_cap_crqs);
3625 ibmvnic_send_crq(adapter, &crq);
3627 crq.query_capability.capability =
3628 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3629 atomic_inc(&adapter->running_cap_crqs);
3630 ibmvnic_send_crq(adapter, &crq);
3632 crq.query_capability.capability =
3633 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3634 atomic_inc(&adapter->running_cap_crqs);
3635 ibmvnic_send_crq(adapter, &crq);
3637 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3638 atomic_inc(&adapter->running_cap_crqs);
3639 ibmvnic_send_crq(adapter, &crq);
3641 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3642 atomic_inc(&adapter->running_cap_crqs);
3643 ibmvnic_send_crq(adapter, &crq);
3645 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3646 atomic_inc(&adapter->running_cap_crqs);
3647 ibmvnic_send_crq(adapter, &crq);
3649 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3650 atomic_inc(&adapter->running_cap_crqs);
3651 ibmvnic_send_crq(adapter, &crq);
3653 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3654 atomic_inc(&adapter->running_cap_crqs);
3655 ibmvnic_send_crq(adapter, &crq);
3657 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3658 atomic_inc(&adapter->running_cap_crqs);
3659 ibmvnic_send_crq(adapter, &crq);
3661 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3662 atomic_inc(&adapter->running_cap_crqs);
3663 ibmvnic_send_crq(adapter, &crq);
3665 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3666 atomic_inc(&adapter->running_cap_crqs);
3667 ibmvnic_send_crq(adapter, &crq);
3669 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3670 atomic_inc(&adapter->running_cap_crqs);
3671 ibmvnic_send_crq(adapter, &crq);
3673 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3674 atomic_inc(&adapter->running_cap_crqs);
3675 ibmvnic_send_crq(adapter, &crq);
3677 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3678 atomic_inc(&adapter->running_cap_crqs);
3679 ibmvnic_send_crq(adapter, &crq);
3681 crq.query_capability.capability =
3682 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3683 atomic_inc(&adapter->running_cap_crqs);
3684 ibmvnic_send_crq(adapter, &crq);
3686 crq.query_capability.capability =
3687 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3688 atomic_inc(&adapter->running_cap_crqs);
3689 ibmvnic_send_crq(adapter, &crq);
3691 crq.query_capability.capability =
3692 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3693 atomic_inc(&adapter->running_cap_crqs);
3694 ibmvnic_send_crq(adapter, &crq);
3696 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3697 atomic_inc(&adapter->running_cap_crqs);
3698 ibmvnic_send_crq(adapter, &crq);
3701 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3702 struct ibmvnic_adapter *adapter)
3704 struct device *dev = &adapter->vdev->dev;
3706 if (crq->get_vpd_size_rsp.rc.code) {
3707 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3708 crq->get_vpd_size_rsp.rc.code);
3709 complete(&adapter->fw_done);
3713 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3714 complete(&adapter->fw_done);
3717 static void handle_vpd_rsp(union ibmvnic_crq *crq,
3718 struct ibmvnic_adapter *adapter)
3720 struct device *dev = &adapter->vdev->dev;
3721 unsigned char *substr = NULL;
3722 u8 fw_level_len = 0;
3724 memset(adapter->fw_version, 0, 32);
3726 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3729 if (crq->get_vpd_rsp.rc.code) {
3730 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3731 crq->get_vpd_rsp.rc.code);
3735 /* get the position of the firmware version info
3736 * located after the ASCII 'RM' substring in the buffer
3738 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3740 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
3744 /* get length of firmware level ASCII substring */
3745 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3746 fw_level_len = *(substr + 2);
3748 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3752 /* copy firmware version string from vpd into adapter */
3753 if ((substr + 3 + fw_level_len) <
3754 (adapter->vpd->buff + adapter->vpd->len)) {
3755 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
3757 dev_info(dev, "FW substr extrapolated VPD buff\n");
3761 if (adapter->fw_version[0] == '\0')
3762 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
3763 complete(&adapter->fw_done);
3766 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3768 struct device *dev = &adapter->vdev->dev;
3769 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
3770 netdev_features_t old_hw_features = 0;
3771 union ibmvnic_crq crq;
3774 dma_unmap_single(dev, adapter->ip_offload_tok,
3775 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3777 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3778 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3779 netdev_dbg(adapter->netdev, "%016lx\n",
3780 ((unsigned long int *)(buf))[i]);
3782 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3783 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3784 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3785 buf->tcp_ipv4_chksum);
3786 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3787 buf->tcp_ipv6_chksum);
3788 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3789 buf->udp_ipv4_chksum);
3790 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3791 buf->udp_ipv6_chksum);
3792 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
3793 buf->large_tx_ipv4);
3794 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
3795 buf->large_tx_ipv6);
3796 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
3797 buf->large_rx_ipv4);
3798 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
3799 buf->large_rx_ipv6);
3800 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
3801 buf->max_ipv4_header_size);
3802 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
3803 buf->max_ipv6_header_size);
3804 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
3805 buf->max_tcp_header_size);
3806 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
3807 buf->max_udp_header_size);
3808 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
3809 buf->max_large_tx_size);
3810 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
3811 buf->max_large_rx_size);
3812 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
3813 buf->ipv6_extension_header);
3814 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
3815 buf->tcp_pseudosum_req);
3816 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
3817 buf->num_ipv6_ext_headers);
3818 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
3819 buf->off_ipv6_ext_headers);
3821 adapter->ip_offload_ctrl_tok =
3822 dma_map_single(dev, &adapter->ip_offload_ctrl,
3823 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
3825 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
3826 dev_err(dev, "Couldn't map ip offload control buffer\n");
3830 adapter->ip_offload_ctrl.len =
3831 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3832 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3833 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3834 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3835 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3836 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3837 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3838 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
3839 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
3840 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
3842 /* large_rx disabled for now, additional features needed */
3843 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3844 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3846 if (adapter->state != VNIC_PROBING) {
3847 old_hw_features = adapter->netdev->hw_features;
3848 adapter->netdev->hw_features = 0;
3851 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
3853 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3854 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
3856 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3857 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
3859 if ((adapter->netdev->features &
3860 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3861 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
3863 if (buf->large_tx_ipv4)
3864 adapter->netdev->hw_features |= NETIF_F_TSO;
3865 if (buf->large_tx_ipv6)
3866 adapter->netdev->hw_features |= NETIF_F_TSO6;
3868 if (adapter->state == VNIC_PROBING) {
3869 adapter->netdev->features |= adapter->netdev->hw_features;
3870 } else if (old_hw_features != adapter->netdev->hw_features) {
3871 netdev_features_t tmp = 0;
3873 /* disable features no longer supported */
3874 adapter->netdev->features &= adapter->netdev->hw_features;
3875 /* turn on features now supported if previously enabled */
3876 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
3877 adapter->netdev->hw_features;
3878 adapter->netdev->features |=
3879 tmp & adapter->netdev->wanted_features;
3882 memset(&crq, 0, sizeof(crq));
3883 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
3884 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
3885 crq.control_ip_offload.len =
3886 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3887 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
3888 ibmvnic_send_crq(adapter, &crq);
3891 static const char *ibmvnic_fw_err_cause(u16 cause)
3894 case ADAPTER_PROBLEM:
3895 return "adapter problem";
3897 return "bus problem";
3899 return "firmware problem";
3901 return "device driver problem";
3903 return "EEH recovery";
3905 return "firmware updated";
3907 return "low Memory";
3913 static void handle_error_indication(union ibmvnic_crq *crq,
3914 struct ibmvnic_adapter *adapter)
3916 struct device *dev = &adapter->vdev->dev;
3919 cause = be16_to_cpu(crq->error_indication.error_cause);
3921 dev_warn_ratelimited(dev,
3922 "Firmware reports %serror, cause: %s. Starting recovery...\n",
3923 crq->error_indication.flags
3924 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
3925 ibmvnic_fw_err_cause(cause));
3927 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
3928 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3930 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
3933 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
3934 struct ibmvnic_adapter *adapter)
3936 struct net_device *netdev = adapter->netdev;
3937 struct device *dev = &adapter->vdev->dev;
3940 rc = crq->change_mac_addr_rsp.rc.code;
3942 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
3945 ether_addr_copy(netdev->dev_addr,
3946 &crq->change_mac_addr_rsp.mac_addr[0]);
3948 complete(&adapter->fw_done);
3952 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3953 struct ibmvnic_adapter *adapter)
3955 struct device *dev = &adapter->vdev->dev;
3959 atomic_dec(&adapter->running_cap_crqs);
3960 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
3962 req_value = &adapter->req_tx_queues;
3966 req_value = &adapter->req_rx_queues;
3969 case REQ_RX_ADD_QUEUES:
3970 req_value = &adapter->req_rx_add_queues;
3973 case REQ_TX_ENTRIES_PER_SUBCRQ:
3974 req_value = &adapter->req_tx_entries_per_subcrq;
3975 name = "tx_entries_per_subcrq";
3977 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
3978 req_value = &adapter->req_rx_add_entries_per_subcrq;
3979 name = "rx_add_entries_per_subcrq";
3982 req_value = &adapter->req_mtu;
3985 case PROMISC_REQUESTED:
3986 req_value = &adapter->promisc;
3990 dev_err(dev, "Got invalid cap request rsp %d\n",
3991 crq->request_capability.capability);
3995 switch (crq->request_capability_rsp.rc.code) {
3998 case PARTIALSUCCESS:
3999 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4001 (long int)be64_to_cpu(crq->request_capability_rsp.
4004 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4006 pr_err("mtu of %llu is not supported. Reverting.\n",
4008 *req_value = adapter->fallback.mtu;
4011 be64_to_cpu(crq->request_capability_rsp.number);
4014 ibmvnic_send_req_caps(adapter, 1);
4017 dev_err(dev, "Error %d in request cap rsp\n",
4018 crq->request_capability_rsp.rc.code);
4022 /* Done receiving requested capabilities, query IP offload support */
4023 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4024 union ibmvnic_crq newcrq;
4025 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4026 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
4027 &adapter->ip_offload_buf;
4029 adapter->wait_capability = false;
4030 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
4034 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4035 if (!firmware_has_feature(FW_FEATURE_CMO))
4036 dev_err(dev, "Couldn't map offload buffer\n");
4040 memset(&newcrq, 0, sizeof(newcrq));
4041 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4042 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4043 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
4044 newcrq.query_ip_offload.ioba =
4045 cpu_to_be32(adapter->ip_offload_tok);
4047 ibmvnic_send_crq(adapter, &newcrq);
4051 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4052 struct ibmvnic_adapter *adapter)
4054 struct device *dev = &adapter->vdev->dev;
4055 struct net_device *netdev = adapter->netdev;
4056 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4057 struct ibmvnic_login_buffer *login = adapter->login_buf;
4060 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4062 dma_unmap_single(dev, adapter->login_rsp_buf_token,
4063 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4065 /* If the number of queues requested can't be allocated by the
4066 * server, the login response will return with code 1. We will need
4067 * to resend the login buffer with fewer queues requested.
4069 if (login_rsp_crq->generic.rc.code) {
4070 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4071 complete(&adapter->init_done);
4075 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4077 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4078 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4079 netdev_dbg(adapter->netdev, "%016lx\n",
4080 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4084 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4085 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4086 adapter->req_rx_add_queues !=
4087 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4088 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4089 ibmvnic_remove(adapter->vdev);
4092 release_login_buffer(adapter);
4093 complete(&adapter->init_done);
4098 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4099 struct ibmvnic_adapter *adapter)
4101 struct device *dev = &adapter->vdev->dev;
4104 rc = crq->request_unmap_rsp.rc.code;
4106 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4109 static void handle_query_map_rsp(union ibmvnic_crq *crq,
4110 struct ibmvnic_adapter *adapter)
4112 struct net_device *netdev = adapter->netdev;
4113 struct device *dev = &adapter->vdev->dev;
4116 rc = crq->query_map_rsp.rc.code;
4118 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4121 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4122 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4123 crq->query_map_rsp.free_pages);
4126 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4127 struct ibmvnic_adapter *adapter)
4129 struct net_device *netdev = adapter->netdev;
4130 struct device *dev = &adapter->vdev->dev;
4133 atomic_dec(&adapter->running_cap_crqs);
4134 netdev_dbg(netdev, "Outstanding queries: %d\n",
4135 atomic_read(&adapter->running_cap_crqs));
4136 rc = crq->query_capability.rc.code;
4138 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4142 switch (be16_to_cpu(crq->query_capability.capability)) {
4144 adapter->min_tx_queues =
4145 be64_to_cpu(crq->query_capability.number);
4146 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4147 adapter->min_tx_queues);
4150 adapter->min_rx_queues =
4151 be64_to_cpu(crq->query_capability.number);
4152 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4153 adapter->min_rx_queues);
4155 case MIN_RX_ADD_QUEUES:
4156 adapter->min_rx_add_queues =
4157 be64_to_cpu(crq->query_capability.number);
4158 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4159 adapter->min_rx_add_queues);
4162 adapter->max_tx_queues =
4163 be64_to_cpu(crq->query_capability.number);
4164 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4165 adapter->max_tx_queues);
4168 adapter->max_rx_queues =
4169 be64_to_cpu(crq->query_capability.number);
4170 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4171 adapter->max_rx_queues);
4173 case MAX_RX_ADD_QUEUES:
4174 adapter->max_rx_add_queues =
4175 be64_to_cpu(crq->query_capability.number);
4176 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4177 adapter->max_rx_add_queues);
4179 case MIN_TX_ENTRIES_PER_SUBCRQ:
4180 adapter->min_tx_entries_per_subcrq =
4181 be64_to_cpu(crq->query_capability.number);
4182 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4183 adapter->min_tx_entries_per_subcrq);
4185 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4186 adapter->min_rx_add_entries_per_subcrq =
4187 be64_to_cpu(crq->query_capability.number);
4188 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4189 adapter->min_rx_add_entries_per_subcrq);
4191 case MAX_TX_ENTRIES_PER_SUBCRQ:
4192 adapter->max_tx_entries_per_subcrq =
4193 be64_to_cpu(crq->query_capability.number);
4194 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4195 adapter->max_tx_entries_per_subcrq);
4197 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4198 adapter->max_rx_add_entries_per_subcrq =
4199 be64_to_cpu(crq->query_capability.number);
4200 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4201 adapter->max_rx_add_entries_per_subcrq);
4203 case TCP_IP_OFFLOAD:
4204 adapter->tcp_ip_offload =
4205 be64_to_cpu(crq->query_capability.number);
4206 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4207 adapter->tcp_ip_offload);
4209 case PROMISC_SUPPORTED:
4210 adapter->promisc_supported =
4211 be64_to_cpu(crq->query_capability.number);
4212 netdev_dbg(netdev, "promisc_supported = %lld\n",
4213 adapter->promisc_supported);
4216 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4217 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4218 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4221 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4222 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4223 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4225 case MAX_MULTICAST_FILTERS:
4226 adapter->max_multicast_filters =
4227 be64_to_cpu(crq->query_capability.number);
4228 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4229 adapter->max_multicast_filters);
4231 case VLAN_HEADER_INSERTION:
4232 adapter->vlan_header_insertion =
4233 be64_to_cpu(crq->query_capability.number);
4234 if (adapter->vlan_header_insertion)
4235 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4236 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4237 adapter->vlan_header_insertion);
4239 case RX_VLAN_HEADER_INSERTION:
4240 adapter->rx_vlan_header_insertion =
4241 be64_to_cpu(crq->query_capability.number);
4242 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4243 adapter->rx_vlan_header_insertion);
4245 case MAX_TX_SG_ENTRIES:
4246 adapter->max_tx_sg_entries =
4247 be64_to_cpu(crq->query_capability.number);
4248 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4249 adapter->max_tx_sg_entries);
4251 case RX_SG_SUPPORTED:
4252 adapter->rx_sg_supported =
4253 be64_to_cpu(crq->query_capability.number);
4254 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4255 adapter->rx_sg_supported);
4257 case OPT_TX_COMP_SUB_QUEUES:
4258 adapter->opt_tx_comp_sub_queues =
4259 be64_to_cpu(crq->query_capability.number);
4260 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4261 adapter->opt_tx_comp_sub_queues);
4263 case OPT_RX_COMP_QUEUES:
4264 adapter->opt_rx_comp_queues =
4265 be64_to_cpu(crq->query_capability.number);
4266 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4267 adapter->opt_rx_comp_queues);
4269 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4270 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4271 be64_to_cpu(crq->query_capability.number);
4272 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4273 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4275 case OPT_TX_ENTRIES_PER_SUBCRQ:
4276 adapter->opt_tx_entries_per_subcrq =
4277 be64_to_cpu(crq->query_capability.number);
4278 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4279 adapter->opt_tx_entries_per_subcrq);
4281 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4282 adapter->opt_rxba_entries_per_subcrq =
4283 be64_to_cpu(crq->query_capability.number);
4284 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4285 adapter->opt_rxba_entries_per_subcrq);
4287 case TX_RX_DESC_REQ:
4288 adapter->tx_rx_desc_req = crq->query_capability.number;
4289 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4290 adapter->tx_rx_desc_req);
4294 netdev_err(netdev, "Got invalid cap rsp %d\n",
4295 crq->query_capability.capability);
4299 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4300 adapter->wait_capability = false;
4301 ibmvnic_send_req_caps(adapter, 0);
4305 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4307 union ibmvnic_crq crq;
4310 memset(&crq, 0, sizeof(crq));
4311 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4312 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4313 init_completion(&adapter->fw_done);
4314 rc = ibmvnic_send_crq(adapter, &crq);
4317 wait_for_completion(&adapter->fw_done);
4318 return adapter->fw_done_rc ? -EIO : 0;
4321 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4322 struct ibmvnic_adapter *adapter)
4324 struct net_device *netdev = adapter->netdev;
4327 rc = crq->query_phys_parms_rsp.rc.code;
4329 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4332 switch (cpu_to_be32(crq->query_phys_parms_rsp.speed)) {
4333 case IBMVNIC_10MBPS:
4334 adapter->speed = SPEED_10;
4336 case IBMVNIC_100MBPS:
4337 adapter->speed = SPEED_100;
4340 adapter->speed = SPEED_1000;
4343 adapter->speed = SPEED_10000;
4345 case IBMVNIC_25GBPS:
4346 adapter->speed = SPEED_25000;
4348 case IBMVNIC_40GBPS:
4349 adapter->speed = SPEED_40000;
4351 case IBMVNIC_50GBPS:
4352 adapter->speed = SPEED_50000;
4354 case IBMVNIC_100GBPS:
4355 adapter->speed = SPEED_100000;
4358 netdev_warn(netdev, "Unknown speed 0x%08x\n",
4359 cpu_to_be32(crq->query_phys_parms_rsp.speed));
4360 adapter->speed = SPEED_UNKNOWN;
4362 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4363 adapter->duplex = DUPLEX_FULL;
4364 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4365 adapter->duplex = DUPLEX_HALF;
4367 adapter->duplex = DUPLEX_UNKNOWN;
4372 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4373 struct ibmvnic_adapter *adapter)
4375 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4376 struct net_device *netdev = adapter->netdev;
4377 struct device *dev = &adapter->vdev->dev;
4378 u64 *u64_crq = (u64 *)crq;
4381 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4382 (unsigned long int)cpu_to_be64(u64_crq[0]),
4383 (unsigned long int)cpu_to_be64(u64_crq[1]));
4384 switch (gen_crq->first) {
4385 case IBMVNIC_CRQ_INIT_RSP:
4386 switch (gen_crq->cmd) {
4387 case IBMVNIC_CRQ_INIT:
4388 dev_info(dev, "Partner initialized\n");
4389 adapter->from_passive_init = true;
4390 adapter->failover_pending = false;
4391 if (!completion_done(&adapter->init_done)) {
4392 complete(&adapter->init_done);
4393 adapter->init_done_rc = -EIO;
4395 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4397 case IBMVNIC_CRQ_INIT_COMPLETE:
4398 dev_info(dev, "Partner initialization complete\n");
4399 adapter->crq.active = true;
4400 send_version_xchg(adapter);
4403 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4406 case IBMVNIC_CRQ_XPORT_EVENT:
4407 netif_carrier_off(netdev);
4408 adapter->crq.active = false;
4409 if (adapter->resetting)
4410 adapter->force_reset_recovery = true;
4411 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4412 dev_info(dev, "Migrated, re-enabling adapter\n");
4413 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4414 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4415 dev_info(dev, "Backing device failover detected\n");
4416 adapter->failover_pending = true;
4418 /* The adapter lost the connection */
4419 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4421 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4424 case IBMVNIC_CRQ_CMD_RSP:
4427 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4432 switch (gen_crq->cmd) {
4433 case VERSION_EXCHANGE_RSP:
4434 rc = crq->version_exchange_rsp.rc.code;
4436 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4439 dev_info(dev, "Partner protocol version is %d\n",
4440 crq->version_exchange_rsp.version);
4441 if (be16_to_cpu(crq->version_exchange_rsp.version) <
4444 be16_to_cpu(crq->version_exchange_rsp.version);
4445 send_cap_queries(adapter);
4447 case QUERY_CAPABILITY_RSP:
4448 handle_query_cap_rsp(crq, adapter);
4451 handle_query_map_rsp(crq, adapter);
4453 case REQUEST_MAP_RSP:
4454 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4455 complete(&adapter->fw_done);
4457 case REQUEST_UNMAP_RSP:
4458 handle_request_unmap_rsp(crq, adapter);
4460 case REQUEST_CAPABILITY_RSP:
4461 handle_request_cap_rsp(crq, adapter);
4464 netdev_dbg(netdev, "Got Login Response\n");
4465 handle_login_rsp(crq, adapter);
4467 case LOGICAL_LINK_STATE_RSP:
4469 "Got Logical Link State Response, state: %d rc: %d\n",
4470 crq->logical_link_state_rsp.link_state,
4471 crq->logical_link_state_rsp.rc.code);
4472 adapter->logical_link_state =
4473 crq->logical_link_state_rsp.link_state;
4474 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4475 complete(&adapter->init_done);
4477 case LINK_STATE_INDICATION:
4478 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4479 adapter->phys_link_state =
4480 crq->link_state_indication.phys_link_state;
4481 adapter->logical_link_state =
4482 crq->link_state_indication.logical_link_state;
4484 case CHANGE_MAC_ADDR_RSP:
4485 netdev_dbg(netdev, "Got MAC address change Response\n");
4486 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4488 case ERROR_INDICATION:
4489 netdev_dbg(netdev, "Got Error Indication\n");
4490 handle_error_indication(crq, adapter);
4492 case REQUEST_STATISTICS_RSP:
4493 netdev_dbg(netdev, "Got Statistics Response\n");
4494 complete(&adapter->stats_done);
4496 case QUERY_IP_OFFLOAD_RSP:
4497 netdev_dbg(netdev, "Got Query IP offload Response\n");
4498 handle_query_ip_offload_rsp(adapter);
4500 case MULTICAST_CTRL_RSP:
4501 netdev_dbg(netdev, "Got multicast control Response\n");
4503 case CONTROL_IP_OFFLOAD_RSP:
4504 netdev_dbg(netdev, "Got Control IP offload Response\n");
4505 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4506 sizeof(adapter->ip_offload_ctrl),
4508 complete(&adapter->init_done);
4510 case COLLECT_FW_TRACE_RSP:
4511 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4512 complete(&adapter->fw_done);
4514 case GET_VPD_SIZE_RSP:
4515 handle_vpd_size_rsp(crq, adapter);
4518 handle_vpd_rsp(crq, adapter);
4520 case QUERY_PHYS_PARMS_RSP:
4521 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
4522 complete(&adapter->fw_done);
4525 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4530 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4532 struct ibmvnic_adapter *adapter = instance;
4534 tasklet_schedule(&adapter->tasklet);
4538 static void ibmvnic_tasklet(void *data)
4540 struct ibmvnic_adapter *adapter = data;
4541 struct ibmvnic_crq_queue *queue = &adapter->crq;
4542 union ibmvnic_crq *crq;
4543 unsigned long flags;
4546 spin_lock_irqsave(&queue->lock, flags);
4548 /* Pull all the valid messages off the CRQ */
4549 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4550 ibmvnic_handle_crq(crq, adapter);
4551 crq->generic.first = 0;
4554 /* remain in tasklet until all
4555 * capabilities responses are received
4557 if (!adapter->wait_capability)
4560 /* if capabilities CRQ's were sent in this tasklet, the following
4561 * tasklet must wait until all responses are received
4563 if (atomic_read(&adapter->running_cap_crqs) != 0)
4564 adapter->wait_capability = true;
4565 spin_unlock_irqrestore(&queue->lock, flags);
4568 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4570 struct vio_dev *vdev = adapter->vdev;
4574 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4575 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4578 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4583 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4585 struct ibmvnic_crq_queue *crq = &adapter->crq;
4586 struct device *dev = &adapter->vdev->dev;
4587 struct vio_dev *vdev = adapter->vdev;
4592 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4593 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4595 /* Clean out the queue */
4596 memset(crq->msgs, 0, PAGE_SIZE);
4598 crq->active = false;
4600 /* And re-open it again */
4601 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4602 crq->msg_token, PAGE_SIZE);
4605 /* Adapter is good, but other end is not ready */
4606 dev_warn(dev, "Partner adapter not ready\n");
4608 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4613 static void release_crq_queue(struct ibmvnic_adapter *adapter)
4615 struct ibmvnic_crq_queue *crq = &adapter->crq;
4616 struct vio_dev *vdev = adapter->vdev;
4622 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4623 free_irq(vdev->irq, adapter);
4624 tasklet_kill(&adapter->tasklet);
4626 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4627 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4629 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4631 free_page((unsigned long)crq->msgs);
4633 crq->active = false;
4636 static int init_crq_queue(struct ibmvnic_adapter *adapter)
4638 struct ibmvnic_crq_queue *crq = &adapter->crq;
4639 struct device *dev = &adapter->vdev->dev;
4640 struct vio_dev *vdev = adapter->vdev;
4641 int rc, retrc = -ENOMEM;
4646 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4647 /* Should we allocate more than one page? */
4652 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4653 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4655 if (dma_mapping_error(dev, crq->msg_token))
4658 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4659 crq->msg_token, PAGE_SIZE);
4661 if (rc == H_RESOURCE)
4662 /* maybe kexecing and resource is busy. try a reset */
4663 rc = ibmvnic_reset_crq(adapter);
4666 if (rc == H_CLOSED) {
4667 dev_warn(dev, "Partner adapter not ready\n");
4669 dev_warn(dev, "Error %d opening adapter\n", rc);
4670 goto reg_crq_failed;
4675 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4676 (unsigned long)adapter);
4678 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4679 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
4680 adapter->vdev->unit_address);
4681 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
4683 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4685 goto req_irq_failed;
4688 rc = vio_enable_interrupts(vdev);
4690 dev_err(dev, "Error %d enabling interrupts\n", rc);
4691 goto req_irq_failed;
4695 spin_lock_init(&crq->lock);
4700 tasklet_kill(&adapter->tasklet);
4702 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4703 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4705 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4707 free_page((unsigned long)crq->msgs);
4712 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
4714 struct device *dev = &adapter->vdev->dev;
4715 unsigned long timeout = msecs_to_jiffies(30000);
4716 u64 old_num_rx_queues, old_num_tx_queues;
4719 adapter->from_passive_init = false;
4721 old_num_rx_queues = adapter->req_rx_queues;
4722 old_num_tx_queues = adapter->req_tx_queues;
4724 reinit_completion(&adapter->init_done);
4725 adapter->init_done_rc = 0;
4726 ibmvnic_send_crq_init(adapter);
4727 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4728 dev_err(dev, "Initialization sequence timed out\n");
4732 if (adapter->init_done_rc) {
4733 release_crq_queue(adapter);
4734 return adapter->init_done_rc;
4737 if (adapter->from_passive_init) {
4738 adapter->state = VNIC_OPEN;
4739 adapter->from_passive_init = false;
4743 if (adapter->resetting && !adapter->wait_for_reset &&
4744 adapter->reset_reason != VNIC_RESET_MOBILITY) {
4745 if (adapter->req_rx_queues != old_num_rx_queues ||
4746 adapter->req_tx_queues != old_num_tx_queues) {
4747 release_sub_crqs(adapter, 0);
4748 rc = init_sub_crqs(adapter);
4750 rc = reset_sub_crq_queues(adapter);
4753 rc = init_sub_crqs(adapter);
4757 dev_err(dev, "Initialization of sub crqs failed\n");
4758 release_crq_queue(adapter);
4762 rc = init_sub_crq_irqs(adapter);
4764 dev_err(dev, "Failed to initialize sub crq irqs\n");
4765 release_crq_queue(adapter);
4771 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4773 struct device *dev = &adapter->vdev->dev;
4774 unsigned long timeout = msecs_to_jiffies(30000);
4777 adapter->from_passive_init = false;
4779 adapter->init_done_rc = 0;
4780 ibmvnic_send_crq_init(adapter);
4781 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4782 dev_err(dev, "Initialization sequence timed out\n");
4786 if (adapter->init_done_rc) {
4787 release_crq_queue(adapter);
4788 return adapter->init_done_rc;
4791 if (adapter->from_passive_init) {
4792 adapter->state = VNIC_OPEN;
4793 adapter->from_passive_init = false;
4797 rc = init_sub_crqs(adapter);
4799 dev_err(dev, "Initialization of sub crqs failed\n");
4800 release_crq_queue(adapter);
4804 rc = init_sub_crq_irqs(adapter);
4806 dev_err(dev, "Failed to initialize sub crq irqs\n");
4807 release_crq_queue(adapter);
4813 static struct device_attribute dev_attr_failover;
4815 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4817 struct ibmvnic_adapter *adapter;
4818 struct net_device *netdev;
4819 unsigned char *mac_addr_p;
4822 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
4825 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
4826 VETH_MAC_ADDR, NULL);
4829 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4830 __FILE__, __LINE__);
4834 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
4835 IBMVNIC_MAX_QUEUES);
4839 adapter = netdev_priv(netdev);
4840 adapter->state = VNIC_PROBING;
4841 dev_set_drvdata(&dev->dev, netdev);
4842 adapter->vdev = dev;
4843 adapter->netdev = netdev;
4845 ether_addr_copy(adapter->mac_addr, mac_addr_p);
4846 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4847 netdev->irq = dev->irq;
4848 netdev->netdev_ops = &ibmvnic_netdev_ops;
4849 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
4850 SET_NETDEV_DEV(netdev, &dev->dev);
4852 spin_lock_init(&adapter->stats_lock);
4854 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4855 INIT_LIST_HEAD(&adapter->rwi_list);
4856 spin_lock_init(&adapter->rwi_lock);
4857 init_completion(&adapter->init_done);
4858 adapter->resetting = false;
4861 rc = init_crq_queue(adapter);
4863 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
4865 goto ibmvnic_init_fail;
4868 rc = ibmvnic_init(adapter);
4869 if (rc && rc != EAGAIN)
4870 goto ibmvnic_init_fail;
4871 } while (rc == EAGAIN);
4873 rc = init_stats_buffers(adapter);
4875 goto ibmvnic_init_fail;
4877 rc = init_stats_token(adapter);
4879 goto ibmvnic_stats_fail;
4881 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4882 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4883 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4885 rc = device_create_file(&dev->dev, &dev_attr_failover);
4887 goto ibmvnic_dev_file_err;
4889 netif_carrier_off(netdev);
4890 rc = register_netdev(netdev);
4892 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
4893 goto ibmvnic_register_fail;
4895 dev_info(&dev->dev, "ibmvnic registered\n");
4897 adapter->state = VNIC_PROBED;
4899 adapter->wait_for_reset = false;
4903 ibmvnic_register_fail:
4904 device_remove_file(&dev->dev, &dev_attr_failover);
4906 ibmvnic_dev_file_err:
4907 release_stats_token(adapter);
4910 release_stats_buffers(adapter);
4913 release_sub_crqs(adapter, 1);
4914 release_crq_queue(adapter);
4915 free_netdev(netdev);
4920 static int ibmvnic_remove(struct vio_dev *dev)
4922 struct net_device *netdev = dev_get_drvdata(&dev->dev);
4923 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4925 adapter->state = VNIC_REMOVING;
4927 unregister_netdevice(netdev);
4929 release_resources(adapter);
4930 release_sub_crqs(adapter, 1);
4931 release_crq_queue(adapter);
4933 release_stats_token(adapter);
4934 release_stats_buffers(adapter);
4936 adapter->state = VNIC_REMOVED;
4939 device_remove_file(&dev->dev, &dev_attr_failover);
4940 free_netdev(netdev);
4941 dev_set_drvdata(&dev->dev, NULL);
4946 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
4947 const char *buf, size_t count)
4949 struct net_device *netdev = dev_get_drvdata(dev);
4950 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4951 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
4952 __be64 session_token;
4955 if (!sysfs_streq(buf, "1"))
4958 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
4959 H_GET_SESSION_TOKEN, 0, 0, 0);
4961 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
4966 session_token = (__be64)retbuf[0];
4967 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
4968 be64_to_cpu(session_token));
4969 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4970 H_SESSION_ERR_DETECTED, session_token, 0, 0);
4972 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
4980 static DEVICE_ATTR_WO(failover);
4982 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
4984 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
4985 struct ibmvnic_adapter *adapter;
4986 struct iommu_table *tbl;
4987 unsigned long ret = 0;
4990 tbl = get_iommu_table_base(&vdev->dev);
4992 /* netdev inits at probe time along with the structures we need below*/
4994 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
4996 adapter = netdev_priv(netdev);
4998 ret += PAGE_SIZE; /* the crq message queue */
4999 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5001 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5002 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5004 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
5006 ret += adapter->rx_pool[i].size *
5007 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5012 static int ibmvnic_resume(struct device *dev)
5014 struct net_device *netdev = dev_get_drvdata(dev);
5015 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5017 if (adapter->state != VNIC_OPEN)
5020 tasklet_schedule(&adapter->tasklet);
5025 static const struct vio_device_id ibmvnic_device_table[] = {
5026 {"network", "IBM,vnic"},
5029 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5031 static const struct dev_pm_ops ibmvnic_pm_ops = {
5032 .resume = ibmvnic_resume
5035 static struct vio_driver ibmvnic_driver = {
5036 .id_table = ibmvnic_device_table,
5037 .probe = ibmvnic_probe,
5038 .remove = ibmvnic_remove,
5039 .get_desired_dma = ibmvnic_get_desired_dma,
5040 .name = ibmvnic_driver_name,
5041 .pm = &ibmvnic_pm_ops,
5044 /* module functions */
5045 static int __init ibmvnic_module_init(void)
5047 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5048 IBMVNIC_DRIVER_VERSION);
5050 return vio_register_driver(&ibmvnic_driver);
5053 static void __exit ibmvnic_module_exit(void)
5055 vio_unregister_driver(&ibmvnic_driver);
5058 module_init(ibmvnic_module_init);
5059 module_exit(ibmvnic_module_exit);