1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
4 /* IBM System i and System p Virtual NIC Device Driver */
5 /* Copyright (C) 2014 IBM Corp. */
6 /* Santiago Leon (santi_leon@yahoo.com) */
7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8 /* John Allen (jallen@linux.vnet.ibm.com) */
11 /* This module contains the implementation of a virtual ethernet device */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor. */
16 /* Messages are passed between the VNIC driver and the VNIC server using */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20 /* are used by the driver to notify the server that a packet is */
21 /* ready for transmission or that a buffer has been added to receive a */
22 /* packet. Subsequently, sCRQs are used by the server to notify the */
23 /* driver that a packet transmission has been completed or that a packet */
24 /* has been received and placed in a waiting buffer. */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit */
28 /* or receive has been completed, the VNIC driver is required to use */
29 /* "long term mapping". This entails that large, continuous DMA mapped */
30 /* buffers are allocated on driver initialization and these buffers are */
31 /* then continuously reused to pass skbs to and from the VNIC server. */
33 /**************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/kthread.h>
57 #include <linux/seq_file.h>
58 #include <linux/interrupt.h>
59 #include <net/net_namespace.h>
60 #include <asm/hvcall.h>
61 #include <linux/atomic.h>
63 #include <asm/iommu.h>
64 #include <linux/uaccess.h>
65 #include <asm/firmware.h>
66 #include <linux/workqueue.h>
67 #include <linux/if_vlan.h>
68 #include <linux/utsname.h>
72 static const char ibmvnic_driver_name[] = "ibmvnic";
73 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
75 MODULE_AUTHOR("Santiago Leon");
76 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
80 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81 static int ibmvnic_remove(struct vio_dev *);
82 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
83 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
87 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
88 union sub_crq *sub_crq);
89 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
90 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
91 static int enable_scrq_irq(struct ibmvnic_adapter *,
92 struct ibmvnic_sub_crq_queue *);
93 static int disable_scrq_irq(struct ibmvnic_adapter *,
94 struct ibmvnic_sub_crq_queue *);
95 static int pending_scrq(struct ibmvnic_adapter *,
96 struct ibmvnic_sub_crq_queue *);
97 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
98 struct ibmvnic_sub_crq_queue *);
99 static int ibmvnic_poll(struct napi_struct *napi, int data);
100 static void send_map_query(struct ibmvnic_adapter *adapter);
101 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
102 static int send_request_unmap(struct ibmvnic_adapter *, u8);
103 static int send_login(struct ibmvnic_adapter *adapter);
104 static void send_cap_queries(struct ibmvnic_adapter *adapter);
105 static int init_sub_crqs(struct ibmvnic_adapter *);
106 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
107 static int ibmvnic_init(struct ibmvnic_adapter *);
108 static int ibmvnic_reset_init(struct ibmvnic_adapter *);
109 static void release_crq_queue(struct ibmvnic_adapter *);
110 static int __ibmvnic_set_mac(struct net_device *, u8 *);
111 static int init_crq_queue(struct ibmvnic_adapter *adapter);
112 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
114 struct ibmvnic_stat {
115 char name[ETH_GSTRING_LEN];
119 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
120 offsetof(struct ibmvnic_statistics, stat))
121 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
123 static const struct ibmvnic_stat ibmvnic_stats[] = {
124 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
125 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
126 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
127 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
128 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
129 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
130 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
131 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
132 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
133 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
134 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
135 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
136 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
137 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
138 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
139 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
140 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
141 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
142 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
143 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
144 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
145 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
148 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
149 unsigned long length, unsigned long *number,
152 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
155 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
162 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
163 struct ibmvnic_long_term_buff *ltb, int size)
165 struct device *dev = &adapter->vdev->dev;
169 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
173 dev_err(dev, "Couldn't alloc long term buffer\n");
176 ltb->map_id = adapter->map_id;
179 init_completion(&adapter->fw_done);
180 rc = send_request_map(adapter, ltb->addr,
181 ltb->size, ltb->map_id);
183 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
186 wait_for_completion(&adapter->fw_done);
188 if (adapter->fw_done_rc) {
189 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
190 adapter->fw_done_rc);
191 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
197 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
198 struct ibmvnic_long_term_buff *ltb)
200 struct device *dev = &adapter->vdev->dev;
205 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
206 adapter->reset_reason != VNIC_RESET_MOBILITY)
207 send_request_unmap(adapter, ltb->map_id);
208 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
211 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
212 struct ibmvnic_long_term_buff *ltb)
216 memset(ltb->buff, 0, ltb->size);
218 init_completion(&adapter->fw_done);
219 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
222 wait_for_completion(&adapter->fw_done);
224 if (adapter->fw_done_rc) {
225 dev_info(&adapter->vdev->dev,
226 "Reset failed, attempting to free and reallocate buffer\n");
227 free_long_term_buff(adapter, ltb);
228 return alloc_long_term_buff(adapter, ltb, ltb->size);
233 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
237 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
239 adapter->rx_pool[i].active = 0;
242 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
243 struct ibmvnic_rx_pool *pool)
245 int count = pool->size - atomic_read(&pool->available);
246 struct device *dev = &adapter->vdev->dev;
247 int buffers_added = 0;
248 unsigned long lpar_rc;
249 union sub_crq sub_crq;
262 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
263 be32_to_cpu(adapter->login_rsp_buf->
266 for (i = 0; i < count; ++i) {
267 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
269 dev_err(dev, "Couldn't replenish rx buff\n");
270 adapter->replenish_no_mem++;
274 index = pool->free_map[pool->next_free];
276 if (pool->rx_buff[index].skb)
277 dev_err(dev, "Inconsistent free_map!\n");
279 /* Copy the skb to the long term mapped DMA buffer */
280 offset = index * pool->buff_size;
281 dst = pool->long_term_buff.buff + offset;
282 memset(dst, 0, pool->buff_size);
283 dma_addr = pool->long_term_buff.addr + offset;
284 pool->rx_buff[index].data = dst;
286 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
287 pool->rx_buff[index].dma = dma_addr;
288 pool->rx_buff[index].skb = skb;
289 pool->rx_buff[index].pool_index = pool->index;
290 pool->rx_buff[index].size = pool->buff_size;
292 memset(&sub_crq, 0, sizeof(sub_crq));
293 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
294 sub_crq.rx_add.correlator =
295 cpu_to_be64((u64)&pool->rx_buff[index]);
296 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
297 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
299 /* The length field of the sCRQ is defined to be 24 bits so the
300 * buffer size needs to be left shifted by a byte before it is
301 * converted to big endian to prevent the last byte from being
304 #ifdef __LITTLE_ENDIAN__
307 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
309 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
311 if (lpar_rc != H_SUCCESS)
315 adapter->replenish_add_buff_success++;
316 pool->next_free = (pool->next_free + 1) % pool->size;
318 atomic_add(buffers_added, &pool->available);
322 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
323 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
324 pool->free_map[pool->next_free] = index;
325 pool->rx_buff[index].skb = NULL;
327 dev_kfree_skb_any(skb);
328 adapter->replenish_add_buff_failure++;
329 atomic_add(buffers_added, &pool->available);
331 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
332 /* Disable buffer pool replenishment and report carrier off if
333 * queue is closed or pending failover.
334 * Firmware guarantees that a signal will be sent to the
335 * driver, triggering a reset.
337 deactivate_rx_pools(adapter);
338 netif_carrier_off(adapter->netdev);
342 static void replenish_pools(struct ibmvnic_adapter *adapter)
346 adapter->replenish_task_cycles++;
347 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
349 if (adapter->rx_pool[i].active)
350 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
354 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
356 kfree(adapter->tx_stats_buffers);
357 kfree(adapter->rx_stats_buffers);
358 adapter->tx_stats_buffers = NULL;
359 adapter->rx_stats_buffers = NULL;
362 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
364 adapter->tx_stats_buffers =
365 kcalloc(IBMVNIC_MAX_QUEUES,
366 sizeof(struct ibmvnic_tx_queue_stats),
368 if (!adapter->tx_stats_buffers)
371 adapter->rx_stats_buffers =
372 kcalloc(IBMVNIC_MAX_QUEUES,
373 sizeof(struct ibmvnic_rx_queue_stats),
375 if (!adapter->rx_stats_buffers)
381 static void release_stats_token(struct ibmvnic_adapter *adapter)
383 struct device *dev = &adapter->vdev->dev;
385 if (!adapter->stats_token)
388 dma_unmap_single(dev, adapter->stats_token,
389 sizeof(struct ibmvnic_statistics),
391 adapter->stats_token = 0;
394 static int init_stats_token(struct ibmvnic_adapter *adapter)
396 struct device *dev = &adapter->vdev->dev;
399 stok = dma_map_single(dev, &adapter->stats,
400 sizeof(struct ibmvnic_statistics),
402 if (dma_mapping_error(dev, stok)) {
403 dev_err(dev, "Couldn't map stats buffer\n");
407 adapter->stats_token = stok;
408 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
412 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
414 struct ibmvnic_rx_pool *rx_pool;
419 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
420 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
422 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
423 for (i = 0; i < rx_scrqs; i++) {
424 rx_pool = &adapter->rx_pool[i];
426 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
428 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
429 free_long_term_buff(adapter, &rx_pool->long_term_buff);
430 rx_pool->buff_size = be64_to_cpu(size_array[i]);
431 rc = alloc_long_term_buff(adapter,
432 &rx_pool->long_term_buff,
436 rc = reset_long_term_buff(adapter,
437 &rx_pool->long_term_buff);
443 for (j = 0; j < rx_pool->size; j++)
444 rx_pool->free_map[j] = j;
446 memset(rx_pool->rx_buff, 0,
447 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
449 atomic_set(&rx_pool->available, 0);
450 rx_pool->next_alloc = 0;
451 rx_pool->next_free = 0;
458 static void release_rx_pools(struct ibmvnic_adapter *adapter)
460 struct ibmvnic_rx_pool *rx_pool;
463 if (!adapter->rx_pool)
466 for (i = 0; i < adapter->num_active_rx_pools; i++) {
467 rx_pool = &adapter->rx_pool[i];
469 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
471 kfree(rx_pool->free_map);
472 free_long_term_buff(adapter, &rx_pool->long_term_buff);
474 if (!rx_pool->rx_buff)
477 for (j = 0; j < rx_pool->size; j++) {
478 if (rx_pool->rx_buff[j].skb) {
479 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
480 rx_pool->rx_buff[j].skb = NULL;
484 kfree(rx_pool->rx_buff);
487 kfree(adapter->rx_pool);
488 adapter->rx_pool = NULL;
489 adapter->num_active_rx_pools = 0;
492 static int init_rx_pools(struct net_device *netdev)
494 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
495 struct device *dev = &adapter->vdev->dev;
496 struct ibmvnic_rx_pool *rx_pool;
502 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
503 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
504 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
506 adapter->rx_pool = kcalloc(rxadd_subcrqs,
507 sizeof(struct ibmvnic_rx_pool),
509 if (!adapter->rx_pool) {
510 dev_err(dev, "Failed to allocate rx pools\n");
514 adapter->num_active_rx_pools = rxadd_subcrqs;
516 for (i = 0; i < rxadd_subcrqs; i++) {
517 rx_pool = &adapter->rx_pool[i];
519 netdev_dbg(adapter->netdev,
520 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
521 i, adapter->req_rx_add_entries_per_subcrq,
522 be64_to_cpu(size_array[i]));
524 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
526 rx_pool->buff_size = be64_to_cpu(size_array[i]);
529 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
531 if (!rx_pool->free_map) {
532 release_rx_pools(adapter);
536 rx_pool->rx_buff = kcalloc(rx_pool->size,
537 sizeof(struct ibmvnic_rx_buff),
539 if (!rx_pool->rx_buff) {
540 dev_err(dev, "Couldn't alloc rx buffers\n");
541 release_rx_pools(adapter);
545 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
546 rx_pool->size * rx_pool->buff_size)) {
547 release_rx_pools(adapter);
551 for (j = 0; j < rx_pool->size; ++j)
552 rx_pool->free_map[j] = j;
554 atomic_set(&rx_pool->available, 0);
555 rx_pool->next_alloc = 0;
556 rx_pool->next_free = 0;
562 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
563 struct ibmvnic_tx_pool *tx_pool)
567 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
571 memset(tx_pool->tx_buff, 0,
572 tx_pool->num_buffers *
573 sizeof(struct ibmvnic_tx_buff));
575 for (i = 0; i < tx_pool->num_buffers; i++)
576 tx_pool->free_map[i] = i;
578 tx_pool->consumer_index = 0;
579 tx_pool->producer_index = 0;
584 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
589 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
590 for (i = 0; i < tx_scrqs; i++) {
591 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
594 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
602 static void release_vpd_data(struct ibmvnic_adapter *adapter)
607 kfree(adapter->vpd->buff);
613 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
614 struct ibmvnic_tx_pool *tx_pool)
616 kfree(tx_pool->tx_buff);
617 kfree(tx_pool->free_map);
618 free_long_term_buff(adapter, &tx_pool->long_term_buff);
621 static void release_tx_pools(struct ibmvnic_adapter *adapter)
625 if (!adapter->tx_pool)
628 for (i = 0; i < adapter->num_active_tx_pools; i++) {
629 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
630 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
633 kfree(adapter->tx_pool);
634 adapter->tx_pool = NULL;
635 kfree(adapter->tso_pool);
636 adapter->tso_pool = NULL;
637 adapter->num_active_tx_pools = 0;
640 static int init_one_tx_pool(struct net_device *netdev,
641 struct ibmvnic_tx_pool *tx_pool,
642 int num_entries, int buf_size)
644 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
647 tx_pool->tx_buff = kcalloc(num_entries,
648 sizeof(struct ibmvnic_tx_buff),
650 if (!tx_pool->tx_buff)
653 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
654 num_entries * buf_size))
657 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
658 if (!tx_pool->free_map)
661 for (i = 0; i < num_entries; i++)
662 tx_pool->free_map[i] = i;
664 tx_pool->consumer_index = 0;
665 tx_pool->producer_index = 0;
666 tx_pool->num_buffers = num_entries;
667 tx_pool->buf_size = buf_size;
672 static int init_tx_pools(struct net_device *netdev)
674 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
678 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
679 adapter->tx_pool = kcalloc(tx_subcrqs,
680 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
681 if (!adapter->tx_pool)
684 adapter->tso_pool = kcalloc(tx_subcrqs,
685 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
686 if (!adapter->tso_pool)
689 adapter->num_active_tx_pools = tx_subcrqs;
691 for (i = 0; i < tx_subcrqs; i++) {
692 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
693 adapter->req_tx_entries_per_subcrq,
694 adapter->req_mtu + VLAN_HLEN);
696 release_tx_pools(adapter);
700 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
704 release_tx_pools(adapter);
712 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
716 if (adapter->napi_enabled)
719 for (i = 0; i < adapter->req_rx_queues; i++)
720 napi_enable(&adapter->napi[i]);
722 adapter->napi_enabled = true;
725 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
729 if (!adapter->napi_enabled)
732 for (i = 0; i < adapter->req_rx_queues; i++) {
733 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
734 napi_disable(&adapter->napi[i]);
737 adapter->napi_enabled = false;
740 static int init_napi(struct ibmvnic_adapter *adapter)
744 adapter->napi = kcalloc(adapter->req_rx_queues,
745 sizeof(struct napi_struct), GFP_KERNEL);
749 for (i = 0; i < adapter->req_rx_queues; i++) {
750 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
751 netif_napi_add(adapter->netdev, &adapter->napi[i],
752 ibmvnic_poll, NAPI_POLL_WEIGHT);
755 adapter->num_active_rx_napi = adapter->req_rx_queues;
759 static void release_napi(struct ibmvnic_adapter *adapter)
766 for (i = 0; i < adapter->num_active_rx_napi; i++) {
767 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
768 netif_napi_del(&adapter->napi[i]);
771 kfree(adapter->napi);
772 adapter->napi = NULL;
773 adapter->num_active_rx_napi = 0;
774 adapter->napi_enabled = false;
777 static int ibmvnic_login(struct net_device *netdev)
779 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
780 unsigned long timeout = msecs_to_jiffies(30000);
787 if (retry_count > IBMVNIC_MAX_QUEUES) {
788 netdev_warn(netdev, "Login attempts exceeded\n");
792 adapter->init_done_rc = 0;
793 reinit_completion(&adapter->init_done);
794 rc = send_login(adapter);
796 netdev_warn(netdev, "Unable to login\n");
800 if (!wait_for_completion_timeout(&adapter->init_done,
802 netdev_warn(netdev, "Login timed out\n");
806 if (adapter->init_done_rc == PARTIALSUCCESS) {
808 release_sub_crqs(adapter, 1);
812 "Received partial success, retrying...\n");
813 adapter->init_done_rc = 0;
814 reinit_completion(&adapter->init_done);
815 send_cap_queries(adapter);
816 if (!wait_for_completion_timeout(&adapter->init_done,
819 "Capabilities query timed out\n");
823 rc = init_sub_crqs(adapter);
826 "SCRQ initialization failed\n");
830 rc = init_sub_crq_irqs(adapter);
833 "SCRQ irq initialization failed\n");
836 } else if (adapter->init_done_rc) {
837 netdev_warn(netdev, "Adapter login failed\n");
842 __ibmvnic_set_mac(netdev, adapter->mac_addr);
847 static void release_login_buffer(struct ibmvnic_adapter *adapter)
849 kfree(adapter->login_buf);
850 adapter->login_buf = NULL;
853 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
855 kfree(adapter->login_rsp_buf);
856 adapter->login_rsp_buf = NULL;
859 static void release_resources(struct ibmvnic_adapter *adapter)
861 release_vpd_data(adapter);
863 release_tx_pools(adapter);
864 release_rx_pools(adapter);
866 release_napi(adapter);
867 release_login_rsp_buffer(adapter);
870 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
872 struct net_device *netdev = adapter->netdev;
873 unsigned long timeout = msecs_to_jiffies(30000);
874 union ibmvnic_crq crq;
878 netdev_dbg(netdev, "setting link state %d\n", link_state);
880 memset(&crq, 0, sizeof(crq));
881 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
882 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
883 crq.logical_link_state.link_state = link_state;
888 reinit_completion(&adapter->init_done);
889 rc = ibmvnic_send_crq(adapter, &crq);
891 netdev_err(netdev, "Failed to set link state\n");
895 if (!wait_for_completion_timeout(&adapter->init_done,
897 netdev_err(netdev, "timeout setting link state\n");
901 if (adapter->init_done_rc == 1) {
902 /* Partuial success, delay and re-send */
905 } else if (adapter->init_done_rc) {
906 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
907 adapter->init_done_rc);
908 return adapter->init_done_rc;
915 static int set_real_num_queues(struct net_device *netdev)
917 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
920 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
921 adapter->req_tx_queues, adapter->req_rx_queues);
923 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
925 netdev_err(netdev, "failed to set the number of tx queues\n");
929 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
931 netdev_err(netdev, "failed to set the number of rx queues\n");
936 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
938 struct device *dev = &adapter->vdev->dev;
939 union ibmvnic_crq crq;
943 if (adapter->vpd->buff)
944 len = adapter->vpd->len;
946 init_completion(&adapter->fw_done);
947 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
948 crq.get_vpd_size.cmd = GET_VPD_SIZE;
949 rc = ibmvnic_send_crq(adapter, &crq);
952 wait_for_completion(&adapter->fw_done);
954 if (!adapter->vpd->len)
957 if (!adapter->vpd->buff)
958 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
959 else if (adapter->vpd->len != len)
961 krealloc(adapter->vpd->buff,
962 adapter->vpd->len, GFP_KERNEL);
964 if (!adapter->vpd->buff) {
965 dev_err(dev, "Could allocate VPD buffer\n");
969 adapter->vpd->dma_addr =
970 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
972 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
973 dev_err(dev, "Could not map VPD buffer\n");
974 kfree(adapter->vpd->buff);
975 adapter->vpd->buff = NULL;
979 reinit_completion(&adapter->fw_done);
980 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
981 crq.get_vpd.cmd = GET_VPD;
982 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
983 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
984 rc = ibmvnic_send_crq(adapter, &crq);
986 kfree(adapter->vpd->buff);
987 adapter->vpd->buff = NULL;
990 wait_for_completion(&adapter->fw_done);
995 static int init_resources(struct ibmvnic_adapter *adapter)
997 struct net_device *netdev = adapter->netdev;
1000 rc = set_real_num_queues(netdev);
1004 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1008 /* Vital Product Data (VPD) */
1009 rc = ibmvnic_get_vpd(adapter);
1011 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1015 adapter->map_id = 1;
1017 rc = init_napi(adapter);
1021 send_map_query(adapter);
1023 rc = init_rx_pools(netdev);
1027 rc = init_tx_pools(netdev);
1031 static int __ibmvnic_open(struct net_device *netdev)
1033 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1034 enum vnic_state prev_state = adapter->state;
1037 adapter->state = VNIC_OPENING;
1038 replenish_pools(adapter);
1039 ibmvnic_napi_enable(adapter);
1041 /* We're ready to receive frames, enable the sub-crq interrupts and
1042 * set the logical link state to up
1044 for (i = 0; i < adapter->req_rx_queues; i++) {
1045 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1046 if (prev_state == VNIC_CLOSED)
1047 enable_irq(adapter->rx_scrq[i]->irq);
1048 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1051 for (i = 0; i < adapter->req_tx_queues; i++) {
1052 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1053 if (prev_state == VNIC_CLOSED)
1054 enable_irq(adapter->tx_scrq[i]->irq);
1055 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1058 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1060 for (i = 0; i < adapter->req_rx_queues; i++)
1061 napi_disable(&adapter->napi[i]);
1062 release_resources(adapter);
1066 netif_tx_start_all_queues(netdev);
1068 if (prev_state == VNIC_CLOSED) {
1069 for (i = 0; i < adapter->req_rx_queues; i++)
1070 napi_schedule(&adapter->napi[i]);
1073 adapter->state = VNIC_OPEN;
1077 static int ibmvnic_open(struct net_device *netdev)
1079 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1082 /* If device failover is pending, just set device state and return.
1083 * Device operation will be handled by reset routine.
1085 if (adapter->failover_pending) {
1086 adapter->state = VNIC_OPEN;
1090 if (adapter->state != VNIC_CLOSED) {
1091 rc = ibmvnic_login(netdev);
1095 rc = init_resources(adapter);
1097 netdev_err(netdev, "failed to initialize resources\n");
1098 release_resources(adapter);
1103 rc = __ibmvnic_open(netdev);
1108 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1110 struct ibmvnic_rx_pool *rx_pool;
1111 struct ibmvnic_rx_buff *rx_buff;
1116 if (!adapter->rx_pool)
1119 rx_scrqs = adapter->num_active_rx_pools;
1120 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1122 /* Free any remaining skbs in the rx buffer pools */
1123 for (i = 0; i < rx_scrqs; i++) {
1124 rx_pool = &adapter->rx_pool[i];
1125 if (!rx_pool || !rx_pool->rx_buff)
1128 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1129 for (j = 0; j < rx_entries; j++) {
1130 rx_buff = &rx_pool->rx_buff[j];
1131 if (rx_buff && rx_buff->skb) {
1132 dev_kfree_skb_any(rx_buff->skb);
1133 rx_buff->skb = NULL;
1139 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1140 struct ibmvnic_tx_pool *tx_pool)
1142 struct ibmvnic_tx_buff *tx_buff;
1146 if (!tx_pool || !tx_pool->tx_buff)
1149 tx_entries = tx_pool->num_buffers;
1151 for (i = 0; i < tx_entries; i++) {
1152 tx_buff = &tx_pool->tx_buff[i];
1153 if (tx_buff && tx_buff->skb) {
1154 dev_kfree_skb_any(tx_buff->skb);
1155 tx_buff->skb = NULL;
1160 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1165 if (!adapter->tx_pool || !adapter->tso_pool)
1168 tx_scrqs = adapter->num_active_tx_pools;
1170 /* Free any remaining skbs in the tx buffer pools */
1171 for (i = 0; i < tx_scrqs; i++) {
1172 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1173 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1174 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1178 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1180 struct net_device *netdev = adapter->netdev;
1183 if (adapter->tx_scrq) {
1184 for (i = 0; i < adapter->req_tx_queues; i++)
1185 if (adapter->tx_scrq[i]->irq) {
1187 "Disabling tx_scrq[%d] irq\n", i);
1188 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1189 disable_irq(adapter->tx_scrq[i]->irq);
1193 if (adapter->rx_scrq) {
1194 for (i = 0; i < adapter->req_rx_queues; i++) {
1195 if (adapter->rx_scrq[i]->irq) {
1197 "Disabling rx_scrq[%d] irq\n", i);
1198 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1199 disable_irq(adapter->rx_scrq[i]->irq);
1205 static void ibmvnic_cleanup(struct net_device *netdev)
1207 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1209 /* ensure that transmissions are stopped if called by do_reset */
1210 if (adapter->resetting)
1211 netif_tx_disable(netdev);
1213 netif_tx_stop_all_queues(netdev);
1215 ibmvnic_napi_disable(adapter);
1216 ibmvnic_disable_irqs(adapter);
1218 clean_rx_pools(adapter);
1219 clean_tx_pools(adapter);
1222 static int __ibmvnic_close(struct net_device *netdev)
1224 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1227 adapter->state = VNIC_CLOSING;
1228 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1231 adapter->state = VNIC_CLOSED;
1235 static int ibmvnic_close(struct net_device *netdev)
1237 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1240 /* If device failover is pending, just set device state and return.
1241 * Device operation will be handled by reset routine.
1243 if (adapter->failover_pending) {
1244 adapter->state = VNIC_CLOSED;
1248 rc = __ibmvnic_close(netdev);
1249 ibmvnic_cleanup(netdev);
1255 * build_hdr_data - creates L2/L3/L4 header data buffer
1256 * @hdr_field - bitfield determining needed headers
1257 * @skb - socket buffer
1258 * @hdr_len - array of header lengths
1259 * @tot_len - total length of data
1261 * Reads hdr_field to determine which headers are needed by firmware.
1262 * Builds a buffer containing these headers. Saves individual header
1263 * lengths and total buffer length to be used to build descriptors.
1265 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1266 int *hdr_len, u8 *hdr_data)
1271 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1272 hdr_len[0] = sizeof(struct vlan_ethhdr);
1274 hdr_len[0] = sizeof(struct ethhdr);
1276 if (skb->protocol == htons(ETH_P_IP)) {
1277 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1278 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1279 hdr_len[2] = tcp_hdrlen(skb);
1280 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1281 hdr_len[2] = sizeof(struct udphdr);
1282 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1283 hdr_len[1] = sizeof(struct ipv6hdr);
1284 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1285 hdr_len[2] = tcp_hdrlen(skb);
1286 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1287 hdr_len[2] = sizeof(struct udphdr);
1288 } else if (skb->protocol == htons(ETH_P_ARP)) {
1289 hdr_len[1] = arp_hdr_len(skb->dev);
1293 memset(hdr_data, 0, 120);
1294 if ((hdr_field >> 6) & 1) {
1295 hdr = skb_mac_header(skb);
1296 memcpy(hdr_data, hdr, hdr_len[0]);
1300 if ((hdr_field >> 5) & 1) {
1301 hdr = skb_network_header(skb);
1302 memcpy(hdr_data + len, hdr, hdr_len[1]);
1306 if ((hdr_field >> 4) & 1) {
1307 hdr = skb_transport_header(skb);
1308 memcpy(hdr_data + len, hdr, hdr_len[2]);
1315 * create_hdr_descs - create header and header extension descriptors
1316 * @hdr_field - bitfield determining needed headers
1317 * @data - buffer containing header data
1318 * @len - length of data buffer
1319 * @hdr_len - array of individual header lengths
1320 * @scrq_arr - descriptor array
1322 * Creates header and, if needed, header extension descriptors and
1323 * places them in a descriptor array, scrq_arr
1326 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1327 union sub_crq *scrq_arr)
1329 union sub_crq hdr_desc;
1335 while (tmp_len > 0) {
1336 cur = hdr_data + len - tmp_len;
1338 memset(&hdr_desc, 0, sizeof(hdr_desc));
1339 if (cur != hdr_data) {
1340 data = hdr_desc.hdr_ext.data;
1341 tmp = tmp_len > 29 ? 29 : tmp_len;
1342 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1343 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1344 hdr_desc.hdr_ext.len = tmp;
1346 data = hdr_desc.hdr.data;
1347 tmp = tmp_len > 24 ? 24 : tmp_len;
1348 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1349 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1350 hdr_desc.hdr.len = tmp;
1351 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1352 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1353 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1354 hdr_desc.hdr.flag = hdr_field << 1;
1356 memcpy(data, cur, tmp);
1358 *scrq_arr = hdr_desc;
1367 * build_hdr_descs_arr - build a header descriptor array
1368 * @skb - socket buffer
1369 * @num_entries - number of descriptors to be sent
1370 * @subcrq - first TX descriptor
1371 * @hdr_field - bit field determining which headers will be sent
1373 * This function will build a TX descriptor array with applicable
1374 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1377 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1378 int *num_entries, u8 hdr_field)
1380 int hdr_len[3] = {0, 0, 0};
1382 u8 *hdr_data = txbuff->hdr_data;
1384 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1386 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1387 txbuff->indir_arr + 1);
1390 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1391 struct net_device *netdev)
1393 /* For some backing devices, mishandling of small packets
1394 * can result in a loss of connection or TX stall. Device
1395 * architects recommend that no packet should be smaller
1396 * than the minimum MTU value provided to the driver, so
1397 * pad any packets to that length
1399 if (skb->len < netdev->min_mtu)
1400 return skb_put_padto(skb, netdev->min_mtu);
1405 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1407 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1408 int queue_num = skb_get_queue_mapping(skb);
1409 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1410 struct device *dev = &adapter->vdev->dev;
1411 struct ibmvnic_tx_buff *tx_buff = NULL;
1412 struct ibmvnic_sub_crq_queue *tx_scrq;
1413 struct ibmvnic_tx_pool *tx_pool;
1414 unsigned int tx_send_failed = 0;
1415 unsigned int tx_map_failed = 0;
1416 unsigned int tx_dropped = 0;
1417 unsigned int tx_packets = 0;
1418 unsigned int tx_bytes = 0;
1419 dma_addr_t data_dma_addr;
1420 struct netdev_queue *txq;
1421 unsigned long lpar_rc;
1422 union sub_crq tx_crq;
1423 unsigned int offset;
1424 int num_entries = 1;
1429 netdev_tx_t ret = NETDEV_TX_OK;
1431 if (adapter->resetting) {
1432 if (!netif_subqueue_stopped(netdev, skb))
1433 netif_stop_subqueue(netdev, queue_num);
1434 dev_kfree_skb_any(skb);
1442 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1448 if (skb_is_gso(skb))
1449 tx_pool = &adapter->tso_pool[queue_num];
1451 tx_pool = &adapter->tx_pool[queue_num];
1453 tx_scrq = adapter->tx_scrq[queue_num];
1454 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1455 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1456 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1458 index = tx_pool->free_map[tx_pool->consumer_index];
1460 if (index == IBMVNIC_INVALID_MAP) {
1461 dev_kfree_skb_any(skb);
1468 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1470 offset = index * tx_pool->buf_size;
1471 dst = tx_pool->long_term_buff.buff + offset;
1472 memset(dst, 0, tx_pool->buf_size);
1473 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1475 if (skb_shinfo(skb)->nr_frags) {
1479 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1480 cur = skb_headlen(skb);
1482 /* Copy the frags */
1483 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1484 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1487 page_address(skb_frag_page(frag)) +
1488 frag->page_offset, skb_frag_size(frag));
1489 cur += skb_frag_size(frag);
1492 skb_copy_from_linear_data(skb, dst, skb->len);
1495 tx_pool->consumer_index =
1496 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1498 tx_buff = &tx_pool->tx_buff[index];
1500 tx_buff->data_dma[0] = data_dma_addr;
1501 tx_buff->data_len[0] = skb->len;
1502 tx_buff->index = index;
1503 tx_buff->pool_index = queue_num;
1504 tx_buff->last_frag = true;
1506 memset(&tx_crq, 0, sizeof(tx_crq));
1507 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1508 tx_crq.v1.type = IBMVNIC_TX_DESC;
1509 tx_crq.v1.n_crq_elem = 1;
1510 tx_crq.v1.n_sge = 1;
1511 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1513 if (skb_is_gso(skb))
1514 tx_crq.v1.correlator =
1515 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1517 tx_crq.v1.correlator = cpu_to_be32(index);
1518 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1519 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1520 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1522 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1523 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1524 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1527 if (skb->protocol == htons(ETH_P_IP)) {
1528 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1529 proto = ip_hdr(skb)->protocol;
1530 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1531 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1532 proto = ipv6_hdr(skb)->nexthdr;
1535 if (proto == IPPROTO_TCP)
1536 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1537 else if (proto == IPPROTO_UDP)
1538 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1540 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1541 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1544 if (skb_is_gso(skb)) {
1545 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1546 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1549 /* determine if l2/3/4 headers are sent to firmware */
1550 if ((*hdrs >> 7) & 1) {
1551 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1552 tx_crq.v1.n_crq_elem = num_entries;
1553 tx_buff->num_entries = num_entries;
1554 tx_buff->indir_arr[0] = tx_crq;
1555 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1556 sizeof(tx_buff->indir_arr),
1558 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1559 dev_kfree_skb_any(skb);
1560 tx_buff->skb = NULL;
1561 if (!firmware_has_feature(FW_FEATURE_CMO))
1562 dev_err(dev, "tx: unable to map descriptor array\n");
1568 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1569 (u64)tx_buff->indir_dma,
1571 dma_unmap_single(dev, tx_buff->indir_dma,
1572 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
1574 tx_buff->num_entries = num_entries;
1575 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1578 if (lpar_rc != H_SUCCESS) {
1579 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1580 dev_err_ratelimited(dev, "tx: send failed\n");
1581 dev_kfree_skb_any(skb);
1582 tx_buff->skb = NULL;
1584 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1585 /* Disable TX and report carrier off if queue is closed
1586 * or pending failover.
1587 * Firmware guarantees that a signal will be sent to the
1588 * driver, triggering a reset or some other action.
1590 netif_tx_stop_all_queues(netdev);
1591 netif_carrier_off(netdev);
1600 if (atomic_add_return(num_entries, &tx_scrq->used)
1601 >= adapter->req_tx_entries_per_subcrq) {
1602 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1603 netif_stop_subqueue(netdev, queue_num);
1607 tx_bytes += skb->len;
1608 txq->trans_start = jiffies;
1613 /* roll back consumer index and map array*/
1614 if (tx_pool->consumer_index == 0)
1615 tx_pool->consumer_index =
1616 tx_pool->num_buffers - 1;
1618 tx_pool->consumer_index--;
1619 tx_pool->free_map[tx_pool->consumer_index] = index;
1621 netdev->stats.tx_dropped += tx_dropped;
1622 netdev->stats.tx_bytes += tx_bytes;
1623 netdev->stats.tx_packets += tx_packets;
1624 adapter->tx_send_failed += tx_send_failed;
1625 adapter->tx_map_failed += tx_map_failed;
1626 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1627 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1628 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1633 static void ibmvnic_set_multi(struct net_device *netdev)
1635 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1636 struct netdev_hw_addr *ha;
1637 union ibmvnic_crq crq;
1639 memset(&crq, 0, sizeof(crq));
1640 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1641 crq.request_capability.cmd = REQUEST_CAPABILITY;
1643 if (netdev->flags & IFF_PROMISC) {
1644 if (!adapter->promisc_supported)
1647 if (netdev->flags & IFF_ALLMULTI) {
1648 /* Accept all multicast */
1649 memset(&crq, 0, sizeof(crq));
1650 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1651 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1652 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1653 ibmvnic_send_crq(adapter, &crq);
1654 } else if (netdev_mc_empty(netdev)) {
1655 /* Reject all multicast */
1656 memset(&crq, 0, sizeof(crq));
1657 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1658 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1659 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1660 ibmvnic_send_crq(adapter, &crq);
1662 /* Accept one or more multicast(s) */
1663 netdev_for_each_mc_addr(ha, netdev) {
1664 memset(&crq, 0, sizeof(crq));
1665 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1666 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1667 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1668 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1670 ibmvnic_send_crq(adapter, &crq);
1676 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
1678 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1679 union ibmvnic_crq crq;
1682 if (!is_valid_ether_addr(dev_addr)) {
1683 rc = -EADDRNOTAVAIL;
1687 memset(&crq, 0, sizeof(crq));
1688 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1689 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1690 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
1692 init_completion(&adapter->fw_done);
1693 rc = ibmvnic_send_crq(adapter, &crq);
1699 wait_for_completion(&adapter->fw_done);
1700 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1701 if (adapter->fw_done_rc) {
1708 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1712 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1714 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1715 struct sockaddr *addr = p;
1719 ether_addr_copy(adapter->mac_addr, addr->sa_data);
1720 if (adapter->state != VNIC_PROBED)
1721 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
1727 * do_reset returns zero if we are able to keep processing reset events, or
1728 * non-zero if we hit a fatal error and must halt.
1730 static int do_reset(struct ibmvnic_adapter *adapter,
1731 struct ibmvnic_rwi *rwi, u32 reset_state)
1733 u64 old_num_rx_queues, old_num_tx_queues;
1734 u64 old_num_rx_slots, old_num_tx_slots;
1735 struct net_device *netdev = adapter->netdev;
1738 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1741 netif_carrier_off(netdev);
1742 adapter->reset_reason = rwi->reset_reason;
1744 old_num_rx_queues = adapter->req_rx_queues;
1745 old_num_tx_queues = adapter->req_tx_queues;
1746 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1747 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
1749 ibmvnic_cleanup(netdev);
1751 if (reset_state == VNIC_OPEN &&
1752 adapter->reset_reason != VNIC_RESET_MOBILITY &&
1753 adapter->reset_reason != VNIC_RESET_FAILOVER) {
1754 rc = __ibmvnic_close(netdev);
1759 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1760 adapter->wait_for_reset) {
1761 release_resources(adapter);
1762 release_sub_crqs(adapter, 1);
1763 release_crq_queue(adapter);
1766 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1767 /* remove the closed state so when we call open it appears
1768 * we are coming from the probed state.
1770 adapter->state = VNIC_PROBED;
1772 if (adapter->wait_for_reset) {
1773 rc = init_crq_queue(adapter);
1774 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
1775 rc = ibmvnic_reenable_crq_queue(adapter);
1776 release_sub_crqs(adapter, 1);
1778 rc = ibmvnic_reset_crq(adapter);
1780 rc = vio_enable_interrupts(adapter->vdev);
1784 netdev_err(adapter->netdev,
1785 "Couldn't initialize crq. rc=%d\n", rc);
1789 rc = ibmvnic_reset_init(adapter);
1791 return IBMVNIC_INIT_FAILED;
1793 /* If the adapter was in PROBE state prior to the reset,
1796 if (reset_state == VNIC_PROBED)
1799 rc = ibmvnic_login(netdev);
1801 adapter->state = reset_state;
1805 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1806 adapter->wait_for_reset) {
1807 rc = init_resources(adapter);
1810 } else if (adapter->req_rx_queues != old_num_rx_queues ||
1811 adapter->req_tx_queues != old_num_tx_queues ||
1812 adapter->req_rx_add_entries_per_subcrq !=
1814 adapter->req_tx_entries_per_subcrq !=
1816 release_rx_pools(adapter);
1817 release_tx_pools(adapter);
1818 release_napi(adapter);
1819 release_vpd_data(adapter);
1821 rc = init_resources(adapter);
1826 rc = reset_tx_pools(adapter);
1830 rc = reset_rx_pools(adapter);
1834 ibmvnic_disable_irqs(adapter);
1836 adapter->state = VNIC_CLOSED;
1838 if (reset_state == VNIC_CLOSED)
1841 rc = __ibmvnic_open(netdev);
1843 if (list_empty(&adapter->rwi_list))
1844 adapter->state = VNIC_CLOSED;
1846 adapter->state = reset_state;
1851 /* refresh device's multicast list */
1852 ibmvnic_set_multi(netdev);
1855 for (i = 0; i < adapter->req_rx_queues; i++)
1856 napi_schedule(&adapter->napi[i]);
1858 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
1859 adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
1860 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
1865 static int do_hard_reset(struct ibmvnic_adapter *adapter,
1866 struct ibmvnic_rwi *rwi, u32 reset_state)
1868 struct net_device *netdev = adapter->netdev;
1871 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
1874 netif_carrier_off(netdev);
1875 adapter->reset_reason = rwi->reset_reason;
1877 ibmvnic_cleanup(netdev);
1878 release_resources(adapter);
1879 release_sub_crqs(adapter, 0);
1880 release_crq_queue(adapter);
1882 /* remove the closed state so when we call open it appears
1883 * we are coming from the probed state.
1885 adapter->state = VNIC_PROBED;
1887 reinit_completion(&adapter->init_done);
1888 rc = init_crq_queue(adapter);
1890 netdev_err(adapter->netdev,
1891 "Couldn't initialize crq. rc=%d\n", rc);
1895 rc = ibmvnic_init(adapter);
1899 /* If the adapter was in PROBE state prior to the reset,
1902 if (reset_state == VNIC_PROBED)
1905 rc = ibmvnic_login(netdev);
1907 adapter->state = VNIC_PROBED;
1911 rc = init_resources(adapter);
1915 ibmvnic_disable_irqs(adapter);
1916 adapter->state = VNIC_CLOSED;
1918 if (reset_state == VNIC_CLOSED)
1921 rc = __ibmvnic_open(netdev);
1923 if (list_empty(&adapter->rwi_list))
1924 adapter->state = VNIC_CLOSED;
1926 adapter->state = reset_state;
1934 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1936 struct ibmvnic_rwi *rwi;
1937 unsigned long flags;
1939 spin_lock_irqsave(&adapter->rwi_lock, flags);
1941 if (!list_empty(&adapter->rwi_list)) {
1942 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1944 list_del(&rwi->list);
1949 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
1953 static void free_all_rwi(struct ibmvnic_adapter *adapter)
1955 struct ibmvnic_rwi *rwi;
1957 rwi = get_next_rwi(adapter);
1960 rwi = get_next_rwi(adapter);
1964 static void __ibmvnic_reset(struct work_struct *work)
1966 struct ibmvnic_rwi *rwi;
1967 struct ibmvnic_adapter *adapter;
1968 bool we_lock_rtnl = false;
1972 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1974 /* netif_set_real_num_xx_queues needs to take rtnl lock here
1975 * unless wait_for_reset is set, in which case the rtnl lock
1976 * has already been taken before initializing the reset
1978 if (!adapter->wait_for_reset) {
1980 we_lock_rtnl = true;
1982 reset_state = adapter->state;
1984 rwi = get_next_rwi(adapter);
1986 if (adapter->force_reset_recovery) {
1987 adapter->force_reset_recovery = false;
1988 rc = do_hard_reset(adapter, rwi, reset_state);
1990 rc = do_reset(adapter, rwi, reset_state);
1993 if (rc && rc != IBMVNIC_INIT_FAILED &&
1994 !adapter->force_reset_recovery)
1997 rwi = get_next_rwi(adapter);
2000 if (adapter->wait_for_reset) {
2001 adapter->wait_for_reset = false;
2002 adapter->reset_done_rc = rc;
2003 complete(&adapter->reset_done);
2007 netdev_dbg(adapter->netdev, "Reset failed\n");
2008 free_all_rwi(adapter);
2011 adapter->resetting = false;
2016 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2017 enum ibmvnic_reset_reason reason)
2019 struct list_head *entry, *tmp_entry;
2020 struct ibmvnic_rwi *rwi, *tmp;
2021 struct net_device *netdev = adapter->netdev;
2022 unsigned long flags;
2025 if (adapter->state == VNIC_REMOVING ||
2026 adapter->state == VNIC_REMOVED ||
2027 adapter->failover_pending) {
2029 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2033 if (adapter->state == VNIC_PROBING) {
2034 netdev_warn(netdev, "Adapter reset during probe\n");
2035 ret = adapter->init_done_rc = EAGAIN;
2039 spin_lock_irqsave(&adapter->rwi_lock, flags);
2041 list_for_each(entry, &adapter->rwi_list) {
2042 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2043 if (tmp->reset_reason == reason) {
2044 netdev_dbg(netdev, "Skipping matching reset\n");
2045 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2051 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2053 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2054 ibmvnic_close(netdev);
2058 /* if we just received a transport event,
2059 * flush reset queue and process this reset
2061 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2062 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2065 rwi->reset_reason = reason;
2066 list_add_tail(&rwi->list, &adapter->rwi_list);
2067 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2068 adapter->resetting = true;
2069 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2070 schedule_work(&adapter->ibmvnic_reset);
2074 if (adapter->wait_for_reset)
2075 adapter->wait_for_reset = false;
2079 static void ibmvnic_tx_timeout(struct net_device *dev)
2081 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2083 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2086 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2087 struct ibmvnic_rx_buff *rx_buff)
2089 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2091 rx_buff->skb = NULL;
2093 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2094 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2096 atomic_dec(&pool->available);
2099 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2101 struct net_device *netdev = napi->dev;
2102 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2103 int scrq_num = (int)(napi - adapter->napi);
2104 int frames_processed = 0;
2107 while (frames_processed < budget) {
2108 struct sk_buff *skb;
2109 struct ibmvnic_rx_buff *rx_buff;
2110 union sub_crq *next;
2115 if (unlikely(adapter->resetting &&
2116 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2117 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2118 napi_complete_done(napi, frames_processed);
2119 return frames_processed;
2122 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2124 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2126 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2127 rx_comp.correlator);
2128 /* do error checking */
2129 if (next->rx_comp.rc) {
2130 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2131 be16_to_cpu(next->rx_comp.rc));
2132 /* free the entry */
2133 next->rx_comp.first = 0;
2134 dev_kfree_skb_any(rx_buff->skb);
2135 remove_buff_from_pool(adapter, rx_buff);
2137 } else if (!rx_buff->skb) {
2138 /* free the entry */
2139 next->rx_comp.first = 0;
2140 remove_buff_from_pool(adapter, rx_buff);
2144 length = be32_to_cpu(next->rx_comp.len);
2145 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2146 flags = next->rx_comp.flags;
2148 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2151 /* VLAN Header has been stripped by the system firmware and
2152 * needs to be inserted by the driver
2154 if (adapter->rx_vlan_header_insertion &&
2155 (flags & IBMVNIC_VLAN_STRIPPED))
2156 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2157 ntohs(next->rx_comp.vlan_tci));
2159 /* free the entry */
2160 next->rx_comp.first = 0;
2161 remove_buff_from_pool(adapter, rx_buff);
2163 skb_put(skb, length);
2164 skb->protocol = eth_type_trans(skb, netdev);
2165 skb_record_rx_queue(skb, scrq_num);
2167 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2168 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2169 skb->ip_summed = CHECKSUM_UNNECESSARY;
2173 napi_gro_receive(napi, skb); /* send it up */
2174 netdev->stats.rx_packets++;
2175 netdev->stats.rx_bytes += length;
2176 adapter->rx_stats_buffers[scrq_num].packets++;
2177 adapter->rx_stats_buffers[scrq_num].bytes += length;
2181 if (adapter->state != VNIC_CLOSING)
2182 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2184 if (frames_processed < budget) {
2185 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2186 napi_complete_done(napi, frames_processed);
2187 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2188 napi_reschedule(napi)) {
2189 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2193 return frames_processed;
2196 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2200 adapter->fallback.mtu = adapter->req_mtu;
2201 adapter->fallback.rx_queues = adapter->req_rx_queues;
2202 adapter->fallback.tx_queues = adapter->req_tx_queues;
2203 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2204 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2206 init_completion(&adapter->reset_done);
2207 adapter->wait_for_reset = true;
2208 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2211 wait_for_completion(&adapter->reset_done);
2214 if (adapter->reset_done_rc) {
2216 adapter->desired.mtu = adapter->fallback.mtu;
2217 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2218 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2219 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2220 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2222 init_completion(&adapter->reset_done);
2223 adapter->wait_for_reset = true;
2224 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2227 wait_for_completion(&adapter->reset_done);
2229 adapter->wait_for_reset = false;
2234 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2236 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2238 adapter->desired.mtu = new_mtu + ETH_HLEN;
2240 return wait_for_reset(adapter);
2243 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2244 struct net_device *dev,
2245 netdev_features_t features)
2247 /* Some backing hardware adapters can not
2248 * handle packets with a MSS less than 224
2249 * or with only one segment.
2251 if (skb_is_gso(skb)) {
2252 if (skb_shinfo(skb)->gso_size < 224 ||
2253 skb_shinfo(skb)->gso_segs == 1)
2254 features &= ~NETIF_F_GSO_MASK;
2260 static const struct net_device_ops ibmvnic_netdev_ops = {
2261 .ndo_open = ibmvnic_open,
2262 .ndo_stop = ibmvnic_close,
2263 .ndo_start_xmit = ibmvnic_xmit,
2264 .ndo_set_rx_mode = ibmvnic_set_multi,
2265 .ndo_set_mac_address = ibmvnic_set_mac,
2266 .ndo_validate_addr = eth_validate_addr,
2267 .ndo_tx_timeout = ibmvnic_tx_timeout,
2268 .ndo_change_mtu = ibmvnic_change_mtu,
2269 .ndo_features_check = ibmvnic_features_check,
2272 /* ethtool functions */
2274 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2275 struct ethtool_link_ksettings *cmd)
2277 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2280 rc = send_query_phys_parms(adapter);
2282 adapter->speed = SPEED_UNKNOWN;
2283 adapter->duplex = DUPLEX_UNKNOWN;
2285 cmd->base.speed = adapter->speed;
2286 cmd->base.duplex = adapter->duplex;
2287 cmd->base.port = PORT_FIBRE;
2288 cmd->base.phy_address = 0;
2289 cmd->base.autoneg = AUTONEG_ENABLE;
2294 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2295 struct ethtool_drvinfo *info)
2297 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2299 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2300 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2301 strlcpy(info->fw_version, adapter->fw_version,
2302 sizeof(info->fw_version));
2305 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2307 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2309 return adapter->msg_enable;
2312 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2314 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2316 adapter->msg_enable = data;
2319 static u32 ibmvnic_get_link(struct net_device *netdev)
2321 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2323 /* Don't need to send a query because we request a logical link up at
2324 * init and then we wait for link state indications
2326 return adapter->logical_link_state;
2329 static void ibmvnic_get_ringparam(struct net_device *netdev,
2330 struct ethtool_ringparam *ring)
2332 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2334 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2335 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2336 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2338 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2339 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2341 ring->rx_mini_max_pending = 0;
2342 ring->rx_jumbo_max_pending = 0;
2343 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2344 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2345 ring->rx_mini_pending = 0;
2346 ring->rx_jumbo_pending = 0;
2349 static int ibmvnic_set_ringparam(struct net_device *netdev,
2350 struct ethtool_ringparam *ring)
2352 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2356 adapter->desired.rx_entries = ring->rx_pending;
2357 adapter->desired.tx_entries = ring->tx_pending;
2359 ret = wait_for_reset(adapter);
2362 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2363 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2365 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2366 ring->rx_pending, ring->tx_pending,
2367 adapter->req_rx_add_entries_per_subcrq,
2368 adapter->req_tx_entries_per_subcrq);
2372 static void ibmvnic_get_channels(struct net_device *netdev,
2373 struct ethtool_channels *channels)
2375 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2377 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2378 channels->max_rx = adapter->max_rx_queues;
2379 channels->max_tx = adapter->max_tx_queues;
2381 channels->max_rx = IBMVNIC_MAX_QUEUES;
2382 channels->max_tx = IBMVNIC_MAX_QUEUES;
2385 channels->max_other = 0;
2386 channels->max_combined = 0;
2387 channels->rx_count = adapter->req_rx_queues;
2388 channels->tx_count = adapter->req_tx_queues;
2389 channels->other_count = 0;
2390 channels->combined_count = 0;
2393 static int ibmvnic_set_channels(struct net_device *netdev,
2394 struct ethtool_channels *channels)
2396 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2400 adapter->desired.rx_queues = channels->rx_count;
2401 adapter->desired.tx_queues = channels->tx_count;
2403 ret = wait_for_reset(adapter);
2406 (adapter->req_rx_queues != channels->rx_count ||
2407 adapter->req_tx_queues != channels->tx_count))
2409 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2410 channels->rx_count, channels->tx_count,
2411 adapter->req_rx_queues, adapter->req_tx_queues);
2416 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2418 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2421 switch (stringset) {
2423 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2424 i++, data += ETH_GSTRING_LEN)
2425 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2427 for (i = 0; i < adapter->req_tx_queues; i++) {
2428 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2429 data += ETH_GSTRING_LEN;
2431 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2432 data += ETH_GSTRING_LEN;
2434 snprintf(data, ETH_GSTRING_LEN,
2435 "tx%d_dropped_packets", i);
2436 data += ETH_GSTRING_LEN;
2439 for (i = 0; i < adapter->req_rx_queues; i++) {
2440 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2441 data += ETH_GSTRING_LEN;
2443 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2444 data += ETH_GSTRING_LEN;
2446 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2447 data += ETH_GSTRING_LEN;
2451 case ETH_SS_PRIV_FLAGS:
2452 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2453 strcpy(data + i * ETH_GSTRING_LEN,
2454 ibmvnic_priv_flags[i]);
2461 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2463 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2467 return ARRAY_SIZE(ibmvnic_stats) +
2468 adapter->req_tx_queues * NUM_TX_STATS +
2469 adapter->req_rx_queues * NUM_RX_STATS;
2470 case ETH_SS_PRIV_FLAGS:
2471 return ARRAY_SIZE(ibmvnic_priv_flags);
2477 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2478 struct ethtool_stats *stats, u64 *data)
2480 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2481 union ibmvnic_crq crq;
2485 memset(&crq, 0, sizeof(crq));
2486 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2487 crq.request_statistics.cmd = REQUEST_STATISTICS;
2488 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2489 crq.request_statistics.len =
2490 cpu_to_be32(sizeof(struct ibmvnic_statistics));
2492 /* Wait for data to be written */
2493 init_completion(&adapter->stats_done);
2494 rc = ibmvnic_send_crq(adapter, &crq);
2497 wait_for_completion(&adapter->stats_done);
2499 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2500 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2501 ibmvnic_stats[i].offset));
2503 for (j = 0; j < adapter->req_tx_queues; j++) {
2504 data[i] = adapter->tx_stats_buffers[j].packets;
2506 data[i] = adapter->tx_stats_buffers[j].bytes;
2508 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2512 for (j = 0; j < adapter->req_rx_queues; j++) {
2513 data[i] = adapter->rx_stats_buffers[j].packets;
2515 data[i] = adapter->rx_stats_buffers[j].bytes;
2517 data[i] = adapter->rx_stats_buffers[j].interrupts;
2522 static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2524 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2526 return adapter->priv_flags;
2529 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2531 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2532 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2535 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2537 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2541 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2542 .get_drvinfo = ibmvnic_get_drvinfo,
2543 .get_msglevel = ibmvnic_get_msglevel,
2544 .set_msglevel = ibmvnic_set_msglevel,
2545 .get_link = ibmvnic_get_link,
2546 .get_ringparam = ibmvnic_get_ringparam,
2547 .set_ringparam = ibmvnic_set_ringparam,
2548 .get_channels = ibmvnic_get_channels,
2549 .set_channels = ibmvnic_set_channels,
2550 .get_strings = ibmvnic_get_strings,
2551 .get_sset_count = ibmvnic_get_sset_count,
2552 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
2553 .get_link_ksettings = ibmvnic_get_link_ksettings,
2554 .get_priv_flags = ibmvnic_get_priv_flags,
2555 .set_priv_flags = ibmvnic_set_priv_flags,
2558 /* Routines for managing CRQs/sCRQs */
2560 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2561 struct ibmvnic_sub_crq_queue *scrq)
2566 free_irq(scrq->irq, scrq);
2567 irq_dispose_mapping(scrq->irq);
2571 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2572 atomic_set(&scrq->used, 0);
2575 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2576 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2580 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2584 for (i = 0; i < adapter->req_tx_queues; i++) {
2585 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2586 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2591 for (i = 0; i < adapter->req_rx_queues; i++) {
2592 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2593 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2601 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2602 struct ibmvnic_sub_crq_queue *scrq,
2605 struct device *dev = &adapter->vdev->dev;
2608 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2611 /* Close the sub-crqs */
2613 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2614 adapter->vdev->unit_address,
2616 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2619 netdev_err(adapter->netdev,
2620 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2625 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2627 free_pages((unsigned long)scrq->msgs, 2);
2631 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2634 struct device *dev = &adapter->vdev->dev;
2635 struct ibmvnic_sub_crq_queue *scrq;
2638 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2643 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2645 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2646 goto zero_page_failed;
2649 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2651 if (dma_mapping_error(dev, scrq->msg_token)) {
2652 dev_warn(dev, "Couldn't map crq queue messages page\n");
2656 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2657 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2659 if (rc == H_RESOURCE)
2660 rc = ibmvnic_reset_crq(adapter);
2662 if (rc == H_CLOSED) {
2663 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2665 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2669 scrq->adapter = adapter;
2670 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2671 spin_lock_init(&scrq->lock);
2673 netdev_dbg(adapter->netdev,
2674 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2675 scrq->crq_num, scrq->hw_irq, scrq->irq);
2680 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2683 free_pages((unsigned long)scrq->msgs, 2);
2690 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2694 if (adapter->tx_scrq) {
2695 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2696 if (!adapter->tx_scrq[i])
2699 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2701 if (adapter->tx_scrq[i]->irq) {
2702 free_irq(adapter->tx_scrq[i]->irq,
2703 adapter->tx_scrq[i]);
2704 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2705 adapter->tx_scrq[i]->irq = 0;
2708 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2712 kfree(adapter->tx_scrq);
2713 adapter->tx_scrq = NULL;
2714 adapter->num_active_tx_scrqs = 0;
2717 if (adapter->rx_scrq) {
2718 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
2719 if (!adapter->rx_scrq[i])
2722 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2724 if (adapter->rx_scrq[i]->irq) {
2725 free_irq(adapter->rx_scrq[i]->irq,
2726 adapter->rx_scrq[i]);
2727 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2728 adapter->rx_scrq[i]->irq = 0;
2731 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2735 kfree(adapter->rx_scrq);
2736 adapter->rx_scrq = NULL;
2737 adapter->num_active_rx_scrqs = 0;
2741 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2742 struct ibmvnic_sub_crq_queue *scrq)
2744 struct device *dev = &adapter->vdev->dev;
2747 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2748 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2750 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2755 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2756 struct ibmvnic_sub_crq_queue *scrq)
2758 struct device *dev = &adapter->vdev->dev;
2761 if (scrq->hw_irq > 0x100000000ULL) {
2762 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2766 if (adapter->resetting &&
2767 adapter->reset_reason == VNIC_RESET_MOBILITY) {
2768 u64 val = (0xff000000) | scrq->hw_irq;
2770 rc = plpar_hcall_norets(H_EOI, val);
2772 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2776 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2777 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2779 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2784 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2785 struct ibmvnic_sub_crq_queue *scrq)
2787 struct device *dev = &adapter->vdev->dev;
2788 struct ibmvnic_tx_pool *tx_pool;
2789 struct ibmvnic_tx_buff *txbuff;
2790 union sub_crq *next;
2795 while (pending_scrq(adapter, scrq)) {
2796 unsigned int pool = scrq->pool_index;
2797 int num_entries = 0;
2799 next = ibmvnic_next_scrq(adapter, scrq);
2800 for (i = 0; i < next->tx_comp.num_comps; i++) {
2801 if (next->tx_comp.rcs[i]) {
2802 dev_err(dev, "tx error %x\n",
2803 next->tx_comp.rcs[i]);
2806 index = be32_to_cpu(next->tx_comp.correlators[i]);
2807 if (index & IBMVNIC_TSO_POOL_MASK) {
2808 tx_pool = &adapter->tso_pool[pool];
2809 index &= ~IBMVNIC_TSO_POOL_MASK;
2811 tx_pool = &adapter->tx_pool[pool];
2814 txbuff = &tx_pool->tx_buff[index];
2816 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2817 if (!txbuff->data_dma[j])
2820 txbuff->data_dma[j] = 0;
2823 if (txbuff->last_frag) {
2824 dev_kfree_skb_any(txbuff->skb);
2828 num_entries += txbuff->num_entries;
2830 tx_pool->free_map[tx_pool->producer_index] = index;
2831 tx_pool->producer_index =
2832 (tx_pool->producer_index + 1) %
2833 tx_pool->num_buffers;
2835 /* remove tx_comp scrq*/
2836 next->tx_comp.first = 0;
2838 if (atomic_sub_return(num_entries, &scrq->used) <=
2839 (adapter->req_tx_entries_per_subcrq / 2) &&
2840 __netif_subqueue_stopped(adapter->netdev,
2841 scrq->pool_index)) {
2842 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
2843 netdev_dbg(adapter->netdev, "Started queue %d\n",
2848 enable_scrq_irq(adapter, scrq);
2850 if (pending_scrq(adapter, scrq)) {
2851 disable_scrq_irq(adapter, scrq);
2858 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
2860 struct ibmvnic_sub_crq_queue *scrq = instance;
2861 struct ibmvnic_adapter *adapter = scrq->adapter;
2863 disable_scrq_irq(adapter, scrq);
2864 ibmvnic_complete_tx(adapter, scrq);
2869 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2871 struct ibmvnic_sub_crq_queue *scrq = instance;
2872 struct ibmvnic_adapter *adapter = scrq->adapter;
2874 /* When booting a kdump kernel we can hit pending interrupts
2875 * prior to completing driver initialization.
2877 if (unlikely(adapter->state != VNIC_OPEN))
2880 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2882 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
2883 disable_scrq_irq(adapter, scrq);
2884 __napi_schedule(&adapter->napi[scrq->scrq_num]);
2890 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
2892 struct device *dev = &adapter->vdev->dev;
2893 struct ibmvnic_sub_crq_queue *scrq;
2897 for (i = 0; i < adapter->req_tx_queues; i++) {
2898 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
2900 scrq = adapter->tx_scrq[i];
2901 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2905 dev_err(dev, "Error mapping irq\n");
2906 goto req_tx_irq_failed;
2909 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
2910 adapter->vdev->unit_address, i);
2911 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
2912 0, scrq->name, scrq);
2915 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
2917 irq_dispose_mapping(scrq->irq);
2918 goto req_tx_irq_failed;
2922 for (i = 0; i < adapter->req_rx_queues; i++) {
2923 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
2925 scrq = adapter->rx_scrq[i];
2926 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2929 dev_err(dev, "Error mapping irq\n");
2930 goto req_rx_irq_failed;
2932 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
2933 adapter->vdev->unit_address, i);
2934 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
2935 0, scrq->name, scrq);
2937 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
2939 irq_dispose_mapping(scrq->irq);
2940 goto req_rx_irq_failed;
2946 for (j = 0; j < i; j++) {
2947 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
2948 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2950 i = adapter->req_tx_queues;
2952 for (j = 0; j < i; j++) {
2953 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
2954 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2956 release_sub_crqs(adapter, 1);
2960 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
2962 struct device *dev = &adapter->vdev->dev;
2963 struct ibmvnic_sub_crq_queue **allqueues;
2964 int registered_queues = 0;
2969 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
2971 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
2975 for (i = 0; i < total_queues; i++) {
2976 allqueues[i] = init_sub_crq_queue(adapter);
2977 if (!allqueues[i]) {
2978 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
2981 registered_queues++;
2984 /* Make sure we were able to register the minimum number of queues */
2985 if (registered_queues <
2986 adapter->min_tx_queues + adapter->min_rx_queues) {
2987 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
2991 /* Distribute the failed allocated queues*/
2992 for (i = 0; i < total_queues - registered_queues + more ; i++) {
2993 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
2996 if (adapter->req_rx_queues > adapter->min_rx_queues)
2997 adapter->req_rx_queues--;
3002 if (adapter->req_tx_queues > adapter->min_tx_queues)
3003 adapter->req_tx_queues--;
3010 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3011 sizeof(*adapter->tx_scrq), GFP_KERNEL);
3012 if (!adapter->tx_scrq)
3015 for (i = 0; i < adapter->req_tx_queues; i++) {
3016 adapter->tx_scrq[i] = allqueues[i];
3017 adapter->tx_scrq[i]->pool_index = i;
3018 adapter->num_active_tx_scrqs++;
3021 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3022 sizeof(*adapter->rx_scrq), GFP_KERNEL);
3023 if (!adapter->rx_scrq)
3026 for (i = 0; i < adapter->req_rx_queues; i++) {
3027 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3028 adapter->rx_scrq[i]->scrq_num = i;
3029 adapter->num_active_rx_scrqs++;
3036 kfree(adapter->tx_scrq);
3037 adapter->tx_scrq = NULL;
3039 for (i = 0; i < registered_queues; i++)
3040 release_sub_crq_queue(adapter, allqueues[i], 1);
3045 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
3047 struct device *dev = &adapter->vdev->dev;
3048 union ibmvnic_crq crq;
3052 /* Sub-CRQ entries are 32 byte long */
3053 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3055 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3056 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3057 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3061 if (adapter->desired.mtu)
3062 adapter->req_mtu = adapter->desired.mtu;
3064 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3066 if (!adapter->desired.tx_entries)
3067 adapter->desired.tx_entries =
3068 adapter->max_tx_entries_per_subcrq;
3069 if (!adapter->desired.rx_entries)
3070 adapter->desired.rx_entries =
3071 adapter->max_rx_add_entries_per_subcrq;
3073 max_entries = IBMVNIC_MAX_LTB_SIZE /
3074 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3076 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3077 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3078 adapter->desired.tx_entries = max_entries;
3081 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3082 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3083 adapter->desired.rx_entries = max_entries;
3086 if (adapter->desired.tx_entries)
3087 adapter->req_tx_entries_per_subcrq =
3088 adapter->desired.tx_entries;
3090 adapter->req_tx_entries_per_subcrq =
3091 adapter->max_tx_entries_per_subcrq;
3093 if (adapter->desired.rx_entries)
3094 adapter->req_rx_add_entries_per_subcrq =
3095 adapter->desired.rx_entries;
3097 adapter->req_rx_add_entries_per_subcrq =
3098 adapter->max_rx_add_entries_per_subcrq;
3100 if (adapter->desired.tx_queues)
3101 adapter->req_tx_queues =
3102 adapter->desired.tx_queues;
3104 adapter->req_tx_queues =
3105 adapter->opt_tx_comp_sub_queues;
3107 if (adapter->desired.rx_queues)
3108 adapter->req_rx_queues =
3109 adapter->desired.rx_queues;
3111 adapter->req_rx_queues =
3112 adapter->opt_rx_comp_queues;
3114 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3117 memset(&crq, 0, sizeof(crq));
3118 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3119 crq.request_capability.cmd = REQUEST_CAPABILITY;
3121 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3122 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3123 atomic_inc(&adapter->running_cap_crqs);
3124 ibmvnic_send_crq(adapter, &crq);
3126 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3127 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3128 atomic_inc(&adapter->running_cap_crqs);
3129 ibmvnic_send_crq(adapter, &crq);
3131 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3132 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3133 atomic_inc(&adapter->running_cap_crqs);
3134 ibmvnic_send_crq(adapter, &crq);
3136 crq.request_capability.capability =
3137 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3138 crq.request_capability.number =
3139 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3140 atomic_inc(&adapter->running_cap_crqs);
3141 ibmvnic_send_crq(adapter, &crq);
3143 crq.request_capability.capability =
3144 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3145 crq.request_capability.number =
3146 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3147 atomic_inc(&adapter->running_cap_crqs);
3148 ibmvnic_send_crq(adapter, &crq);
3150 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3151 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3152 atomic_inc(&adapter->running_cap_crqs);
3153 ibmvnic_send_crq(adapter, &crq);
3155 if (adapter->netdev->flags & IFF_PROMISC) {
3156 if (adapter->promisc_supported) {
3157 crq.request_capability.capability =
3158 cpu_to_be16(PROMISC_REQUESTED);
3159 crq.request_capability.number = cpu_to_be64(1);
3160 atomic_inc(&adapter->running_cap_crqs);
3161 ibmvnic_send_crq(adapter, &crq);
3164 crq.request_capability.capability =
3165 cpu_to_be16(PROMISC_REQUESTED);
3166 crq.request_capability.number = cpu_to_be64(0);
3167 atomic_inc(&adapter->running_cap_crqs);
3168 ibmvnic_send_crq(adapter, &crq);
3172 static int pending_scrq(struct ibmvnic_adapter *adapter,
3173 struct ibmvnic_sub_crq_queue *scrq)
3175 union sub_crq *entry = &scrq->msgs[scrq->cur];
3177 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3183 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3184 struct ibmvnic_sub_crq_queue *scrq)
3186 union sub_crq *entry;
3187 unsigned long flags;
3189 spin_lock_irqsave(&scrq->lock, flags);
3190 entry = &scrq->msgs[scrq->cur];
3191 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3192 if (++scrq->cur == scrq->size)
3197 spin_unlock_irqrestore(&scrq->lock, flags);
3202 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3204 struct ibmvnic_crq_queue *queue = &adapter->crq;
3205 union ibmvnic_crq *crq;
3207 crq = &queue->msgs[queue->cur];
3208 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3209 if (++queue->cur == queue->size)
3218 static void print_subcrq_error(struct device *dev, int rc, const char *func)
3222 dev_warn_ratelimited(dev,
3223 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3227 dev_warn_ratelimited(dev,
3228 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3232 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3237 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3238 union sub_crq *sub_crq)
3240 unsigned int ua = adapter->vdev->unit_address;
3241 struct device *dev = &adapter->vdev->dev;
3242 u64 *u64_crq = (u64 *)sub_crq;
3245 netdev_dbg(adapter->netdev,
3246 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3247 (unsigned long int)cpu_to_be64(remote_handle),
3248 (unsigned long int)cpu_to_be64(u64_crq[0]),
3249 (unsigned long int)cpu_to_be64(u64_crq[1]),
3250 (unsigned long int)cpu_to_be64(u64_crq[2]),
3251 (unsigned long int)cpu_to_be64(u64_crq[3]));
3253 /* Make sure the hypervisor sees the complete request */
3256 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3257 cpu_to_be64(remote_handle),
3258 cpu_to_be64(u64_crq[0]),
3259 cpu_to_be64(u64_crq[1]),
3260 cpu_to_be64(u64_crq[2]),
3261 cpu_to_be64(u64_crq[3]));
3264 print_subcrq_error(dev, rc, __func__);
3269 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3270 u64 remote_handle, u64 ioba, u64 num_entries)
3272 unsigned int ua = adapter->vdev->unit_address;
3273 struct device *dev = &adapter->vdev->dev;
3276 /* Make sure the hypervisor sees the complete request */
3278 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3279 cpu_to_be64(remote_handle),
3283 print_subcrq_error(dev, rc, __func__);
3288 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3289 union ibmvnic_crq *crq)
3291 unsigned int ua = adapter->vdev->unit_address;
3292 struct device *dev = &adapter->vdev->dev;
3293 u64 *u64_crq = (u64 *)crq;
3296 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3297 (unsigned long int)cpu_to_be64(u64_crq[0]),
3298 (unsigned long int)cpu_to_be64(u64_crq[1]));
3300 if (!adapter->crq.active &&
3301 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3302 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3306 /* Make sure the hypervisor sees the complete request */
3309 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3310 cpu_to_be64(u64_crq[0]),
3311 cpu_to_be64(u64_crq[1]));
3314 if (rc == H_CLOSED) {
3315 dev_warn(dev, "CRQ Queue closed\n");
3316 if (adapter->resetting)
3317 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3320 dev_warn(dev, "Send error (rc=%d)\n", rc);
3326 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3328 union ibmvnic_crq crq;
3330 memset(&crq, 0, sizeof(crq));
3331 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3332 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3333 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3335 return ibmvnic_send_crq(adapter, &crq);
3338 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3340 union ibmvnic_crq crq;
3342 memset(&crq, 0, sizeof(crq));
3343 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3344 crq.version_exchange.cmd = VERSION_EXCHANGE;
3345 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3347 return ibmvnic_send_crq(adapter, &crq);
3350 struct vnic_login_client_data {
3356 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3360 /* Calculate the amount of buffer space needed for the
3361 * vnic client data in the login buffer. There are four entries,
3362 * OS name, LPAR name, device name, and a null last entry.
3364 len = 4 * sizeof(struct vnic_login_client_data);
3365 len += 6; /* "Linux" plus NULL */
3366 len += strlen(utsname()->nodename) + 1;
3367 len += strlen(adapter->netdev->name) + 1;
3372 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3373 struct vnic_login_client_data *vlcd)
3375 const char *os_name = "Linux";
3378 /* Type 1 - LPAR OS */
3380 len = strlen(os_name) + 1;
3381 vlcd->len = cpu_to_be16(len);
3382 strncpy(vlcd->name, os_name, len);
3383 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3385 /* Type 2 - LPAR name */
3387 len = strlen(utsname()->nodename) + 1;
3388 vlcd->len = cpu_to_be16(len);
3389 strncpy(vlcd->name, utsname()->nodename, len);
3390 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3392 /* Type 3 - device name */
3394 len = strlen(adapter->netdev->name) + 1;
3395 vlcd->len = cpu_to_be16(len);
3396 strncpy(vlcd->name, adapter->netdev->name, len);
3399 static int send_login(struct ibmvnic_adapter *adapter)
3401 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3402 struct ibmvnic_login_buffer *login_buffer;
3403 struct device *dev = &adapter->vdev->dev;
3404 dma_addr_t rsp_buffer_token;
3405 dma_addr_t buffer_token;
3406 size_t rsp_buffer_size;
3407 union ibmvnic_crq crq;
3411 int client_data_len;
3412 struct vnic_login_client_data *vlcd;
3415 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3416 netdev_err(adapter->netdev,
3417 "RX or TX queues are not allocated, device login failed\n");
3421 release_login_rsp_buffer(adapter);
3422 client_data_len = vnic_client_data_len(adapter);
3425 sizeof(struct ibmvnic_login_buffer) +
3426 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3429 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3431 goto buf_alloc_failed;
3433 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3435 if (dma_mapping_error(dev, buffer_token)) {
3436 dev_err(dev, "Couldn't map login buffer\n");
3437 goto buf_map_failed;
3440 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3441 sizeof(u64) * adapter->req_tx_queues +
3442 sizeof(u64) * adapter->req_rx_queues +
3443 sizeof(u64) * adapter->req_rx_queues +
3444 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3446 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3447 if (!login_rsp_buffer)
3448 goto buf_rsp_alloc_failed;
3450 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3451 rsp_buffer_size, DMA_FROM_DEVICE);
3452 if (dma_mapping_error(dev, rsp_buffer_token)) {
3453 dev_err(dev, "Couldn't map login rsp buffer\n");
3454 goto buf_rsp_map_failed;
3457 adapter->login_buf = login_buffer;
3458 adapter->login_buf_token = buffer_token;
3459 adapter->login_buf_sz = buffer_size;
3460 adapter->login_rsp_buf = login_rsp_buffer;
3461 adapter->login_rsp_buf_token = rsp_buffer_token;
3462 adapter->login_rsp_buf_sz = rsp_buffer_size;
3464 login_buffer->len = cpu_to_be32(buffer_size);
3465 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3466 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3467 login_buffer->off_txcomp_subcrqs =
3468 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3469 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3470 login_buffer->off_rxcomp_subcrqs =
3471 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3472 sizeof(u64) * adapter->req_tx_queues);
3473 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3474 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3476 tx_list_p = (__be64 *)((char *)login_buffer +
3477 sizeof(struct ibmvnic_login_buffer));
3478 rx_list_p = (__be64 *)((char *)login_buffer +
3479 sizeof(struct ibmvnic_login_buffer) +
3480 sizeof(u64) * adapter->req_tx_queues);
3482 for (i = 0; i < adapter->req_tx_queues; i++) {
3483 if (adapter->tx_scrq[i]) {
3484 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3489 for (i = 0; i < adapter->req_rx_queues; i++) {
3490 if (adapter->rx_scrq[i]) {
3491 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3496 /* Insert vNIC login client data */
3497 vlcd = (struct vnic_login_client_data *)
3498 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3499 login_buffer->client_data_offset =
3500 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3501 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3503 vnic_add_client_data(adapter, vlcd);
3505 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3506 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3507 netdev_dbg(adapter->netdev, "%016lx\n",
3508 ((unsigned long int *)(adapter->login_buf))[i]);
3511 memset(&crq, 0, sizeof(crq));
3512 crq.login.first = IBMVNIC_CRQ_CMD;
3513 crq.login.cmd = LOGIN;
3514 crq.login.ioba = cpu_to_be32(buffer_token);
3515 crq.login.len = cpu_to_be32(buffer_size);
3516 ibmvnic_send_crq(adapter, &crq);
3521 kfree(login_rsp_buffer);
3522 buf_rsp_alloc_failed:
3523 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3525 kfree(login_buffer);
3530 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3533 union ibmvnic_crq crq;
3535 memset(&crq, 0, sizeof(crq));
3536 crq.request_map.first = IBMVNIC_CRQ_CMD;
3537 crq.request_map.cmd = REQUEST_MAP;
3538 crq.request_map.map_id = map_id;
3539 crq.request_map.ioba = cpu_to_be32(addr);
3540 crq.request_map.len = cpu_to_be32(len);
3541 return ibmvnic_send_crq(adapter, &crq);
3544 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3546 union ibmvnic_crq crq;
3548 memset(&crq, 0, sizeof(crq));
3549 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3550 crq.request_unmap.cmd = REQUEST_UNMAP;
3551 crq.request_unmap.map_id = map_id;
3552 return ibmvnic_send_crq(adapter, &crq);
3555 static void send_map_query(struct ibmvnic_adapter *adapter)
3557 union ibmvnic_crq crq;
3559 memset(&crq, 0, sizeof(crq));
3560 crq.query_map.first = IBMVNIC_CRQ_CMD;
3561 crq.query_map.cmd = QUERY_MAP;
3562 ibmvnic_send_crq(adapter, &crq);
3565 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3566 static void send_cap_queries(struct ibmvnic_adapter *adapter)
3568 union ibmvnic_crq crq;
3570 atomic_set(&adapter->running_cap_crqs, 0);
3571 memset(&crq, 0, sizeof(crq));
3572 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3573 crq.query_capability.cmd = QUERY_CAPABILITY;
3575 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3576 atomic_inc(&adapter->running_cap_crqs);
3577 ibmvnic_send_crq(adapter, &crq);
3579 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3580 atomic_inc(&adapter->running_cap_crqs);
3581 ibmvnic_send_crq(adapter, &crq);
3583 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3584 atomic_inc(&adapter->running_cap_crqs);
3585 ibmvnic_send_crq(adapter, &crq);
3587 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3588 atomic_inc(&adapter->running_cap_crqs);
3589 ibmvnic_send_crq(adapter, &crq);
3591 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3592 atomic_inc(&adapter->running_cap_crqs);
3593 ibmvnic_send_crq(adapter, &crq);
3595 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3596 atomic_inc(&adapter->running_cap_crqs);
3597 ibmvnic_send_crq(adapter, &crq);
3599 crq.query_capability.capability =
3600 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3601 atomic_inc(&adapter->running_cap_crqs);
3602 ibmvnic_send_crq(adapter, &crq);
3604 crq.query_capability.capability =
3605 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3606 atomic_inc(&adapter->running_cap_crqs);
3607 ibmvnic_send_crq(adapter, &crq);
3609 crq.query_capability.capability =
3610 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3611 atomic_inc(&adapter->running_cap_crqs);
3612 ibmvnic_send_crq(adapter, &crq);
3614 crq.query_capability.capability =
3615 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3616 atomic_inc(&adapter->running_cap_crqs);
3617 ibmvnic_send_crq(adapter, &crq);
3619 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3620 atomic_inc(&adapter->running_cap_crqs);
3621 ibmvnic_send_crq(adapter, &crq);
3623 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3624 atomic_inc(&adapter->running_cap_crqs);
3625 ibmvnic_send_crq(adapter, &crq);
3627 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3628 atomic_inc(&adapter->running_cap_crqs);
3629 ibmvnic_send_crq(adapter, &crq);
3631 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3632 atomic_inc(&adapter->running_cap_crqs);
3633 ibmvnic_send_crq(adapter, &crq);
3635 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3636 atomic_inc(&adapter->running_cap_crqs);
3637 ibmvnic_send_crq(adapter, &crq);
3639 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3640 atomic_inc(&adapter->running_cap_crqs);
3641 ibmvnic_send_crq(adapter, &crq);
3643 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3644 atomic_inc(&adapter->running_cap_crqs);
3645 ibmvnic_send_crq(adapter, &crq);
3647 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3648 atomic_inc(&adapter->running_cap_crqs);
3649 ibmvnic_send_crq(adapter, &crq);
3651 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3652 atomic_inc(&adapter->running_cap_crqs);
3653 ibmvnic_send_crq(adapter, &crq);
3655 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3656 atomic_inc(&adapter->running_cap_crqs);
3657 ibmvnic_send_crq(adapter, &crq);
3659 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3660 atomic_inc(&adapter->running_cap_crqs);
3661 ibmvnic_send_crq(adapter, &crq);
3663 crq.query_capability.capability =
3664 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3665 atomic_inc(&adapter->running_cap_crqs);
3666 ibmvnic_send_crq(adapter, &crq);
3668 crq.query_capability.capability =
3669 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3670 atomic_inc(&adapter->running_cap_crqs);
3671 ibmvnic_send_crq(adapter, &crq);
3673 crq.query_capability.capability =
3674 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3675 atomic_inc(&adapter->running_cap_crqs);
3676 ibmvnic_send_crq(adapter, &crq);
3678 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3679 atomic_inc(&adapter->running_cap_crqs);
3680 ibmvnic_send_crq(adapter, &crq);
3683 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3684 struct ibmvnic_adapter *adapter)
3686 struct device *dev = &adapter->vdev->dev;
3688 if (crq->get_vpd_size_rsp.rc.code) {
3689 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3690 crq->get_vpd_size_rsp.rc.code);
3691 complete(&adapter->fw_done);
3695 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3696 complete(&adapter->fw_done);
3699 static void handle_vpd_rsp(union ibmvnic_crq *crq,
3700 struct ibmvnic_adapter *adapter)
3702 struct device *dev = &adapter->vdev->dev;
3703 unsigned char *substr = NULL;
3704 u8 fw_level_len = 0;
3706 memset(adapter->fw_version, 0, 32);
3708 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3711 if (crq->get_vpd_rsp.rc.code) {
3712 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3713 crq->get_vpd_rsp.rc.code);
3717 /* get the position of the firmware version info
3718 * located after the ASCII 'RM' substring in the buffer
3720 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3722 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
3726 /* get length of firmware level ASCII substring */
3727 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3728 fw_level_len = *(substr + 2);
3730 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3734 /* copy firmware version string from vpd into adapter */
3735 if ((substr + 3 + fw_level_len) <
3736 (adapter->vpd->buff + adapter->vpd->len)) {
3737 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
3739 dev_info(dev, "FW substr extrapolated VPD buff\n");
3743 if (adapter->fw_version[0] == '\0')
3744 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
3745 complete(&adapter->fw_done);
3748 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3750 struct device *dev = &adapter->vdev->dev;
3751 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
3752 netdev_features_t old_hw_features = 0;
3753 union ibmvnic_crq crq;
3756 dma_unmap_single(dev, adapter->ip_offload_tok,
3757 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3759 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3760 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3761 netdev_dbg(adapter->netdev, "%016lx\n",
3762 ((unsigned long int *)(buf))[i]);
3764 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3765 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3766 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3767 buf->tcp_ipv4_chksum);
3768 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3769 buf->tcp_ipv6_chksum);
3770 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3771 buf->udp_ipv4_chksum);
3772 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3773 buf->udp_ipv6_chksum);
3774 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
3775 buf->large_tx_ipv4);
3776 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
3777 buf->large_tx_ipv6);
3778 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
3779 buf->large_rx_ipv4);
3780 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
3781 buf->large_rx_ipv6);
3782 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
3783 buf->max_ipv4_header_size);
3784 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
3785 buf->max_ipv6_header_size);
3786 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
3787 buf->max_tcp_header_size);
3788 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
3789 buf->max_udp_header_size);
3790 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
3791 buf->max_large_tx_size);
3792 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
3793 buf->max_large_rx_size);
3794 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
3795 buf->ipv6_extension_header);
3796 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
3797 buf->tcp_pseudosum_req);
3798 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
3799 buf->num_ipv6_ext_headers);
3800 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
3801 buf->off_ipv6_ext_headers);
3803 adapter->ip_offload_ctrl_tok =
3804 dma_map_single(dev, &adapter->ip_offload_ctrl,
3805 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
3807 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
3808 dev_err(dev, "Couldn't map ip offload control buffer\n");
3812 adapter->ip_offload_ctrl.len =
3813 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3814 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3815 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3816 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3817 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3818 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3819 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3820 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
3821 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
3822 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
3824 /* large_rx disabled for now, additional features needed */
3825 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3826 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3828 if (adapter->state != VNIC_PROBING) {
3829 old_hw_features = adapter->netdev->hw_features;
3830 adapter->netdev->hw_features = 0;
3833 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
3835 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3836 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
3838 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3839 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
3841 if ((adapter->netdev->features &
3842 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3843 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
3845 if (buf->large_tx_ipv4)
3846 adapter->netdev->hw_features |= NETIF_F_TSO;
3847 if (buf->large_tx_ipv6)
3848 adapter->netdev->hw_features |= NETIF_F_TSO6;
3850 if (adapter->state == VNIC_PROBING) {
3851 adapter->netdev->features |= adapter->netdev->hw_features;
3852 } else if (old_hw_features != adapter->netdev->hw_features) {
3853 netdev_features_t tmp = 0;
3855 /* disable features no longer supported */
3856 adapter->netdev->features &= adapter->netdev->hw_features;
3857 /* turn on features now supported if previously enabled */
3858 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
3859 adapter->netdev->hw_features;
3860 adapter->netdev->features |=
3861 tmp & adapter->netdev->wanted_features;
3864 memset(&crq, 0, sizeof(crq));
3865 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
3866 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
3867 crq.control_ip_offload.len =
3868 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3869 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
3870 ibmvnic_send_crq(adapter, &crq);
3873 static const char *ibmvnic_fw_err_cause(u16 cause)
3876 case ADAPTER_PROBLEM:
3877 return "adapter problem";
3879 return "bus problem";
3881 return "firmware problem";
3883 return "device driver problem";
3885 return "EEH recovery";
3887 return "firmware updated";
3889 return "low Memory";
3895 static void handle_error_indication(union ibmvnic_crq *crq,
3896 struct ibmvnic_adapter *adapter)
3898 struct device *dev = &adapter->vdev->dev;
3901 cause = be16_to_cpu(crq->error_indication.error_cause);
3903 dev_warn_ratelimited(dev,
3904 "Firmware reports %serror, cause: %s. Starting recovery...\n",
3905 crq->error_indication.flags
3906 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
3907 ibmvnic_fw_err_cause(cause));
3909 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
3910 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3912 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
3915 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
3916 struct ibmvnic_adapter *adapter)
3918 struct net_device *netdev = adapter->netdev;
3919 struct device *dev = &adapter->vdev->dev;
3922 rc = crq->change_mac_addr_rsp.rc.code;
3924 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
3927 ether_addr_copy(netdev->dev_addr,
3928 &crq->change_mac_addr_rsp.mac_addr[0]);
3930 complete(&adapter->fw_done);
3934 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3935 struct ibmvnic_adapter *adapter)
3937 struct device *dev = &adapter->vdev->dev;
3941 atomic_dec(&adapter->running_cap_crqs);
3942 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
3944 req_value = &adapter->req_tx_queues;
3948 req_value = &adapter->req_rx_queues;
3951 case REQ_RX_ADD_QUEUES:
3952 req_value = &adapter->req_rx_add_queues;
3955 case REQ_TX_ENTRIES_PER_SUBCRQ:
3956 req_value = &adapter->req_tx_entries_per_subcrq;
3957 name = "tx_entries_per_subcrq";
3959 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
3960 req_value = &adapter->req_rx_add_entries_per_subcrq;
3961 name = "rx_add_entries_per_subcrq";
3964 req_value = &adapter->req_mtu;
3967 case PROMISC_REQUESTED:
3968 req_value = &adapter->promisc;
3972 dev_err(dev, "Got invalid cap request rsp %d\n",
3973 crq->request_capability.capability);
3977 switch (crq->request_capability_rsp.rc.code) {
3980 case PARTIALSUCCESS:
3981 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3983 (long int)be64_to_cpu(crq->request_capability_rsp.
3986 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3988 pr_err("mtu of %llu is not supported. Reverting.\n",
3990 *req_value = adapter->fallback.mtu;
3993 be64_to_cpu(crq->request_capability_rsp.number);
3996 ibmvnic_send_req_caps(adapter, 1);
3999 dev_err(dev, "Error %d in request cap rsp\n",
4000 crq->request_capability_rsp.rc.code);
4004 /* Done receiving requested capabilities, query IP offload support */
4005 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4006 union ibmvnic_crq newcrq;
4007 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4008 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
4009 &adapter->ip_offload_buf;
4011 adapter->wait_capability = false;
4012 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
4016 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4017 if (!firmware_has_feature(FW_FEATURE_CMO))
4018 dev_err(dev, "Couldn't map offload buffer\n");
4022 memset(&newcrq, 0, sizeof(newcrq));
4023 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4024 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4025 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
4026 newcrq.query_ip_offload.ioba =
4027 cpu_to_be32(adapter->ip_offload_tok);
4029 ibmvnic_send_crq(adapter, &newcrq);
4033 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4034 struct ibmvnic_adapter *adapter)
4036 struct device *dev = &adapter->vdev->dev;
4037 struct net_device *netdev = adapter->netdev;
4038 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4039 struct ibmvnic_login_buffer *login = adapter->login_buf;
4042 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4044 dma_unmap_single(dev, adapter->login_rsp_buf_token,
4045 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4047 /* If the number of queues requested can't be allocated by the
4048 * server, the login response will return with code 1. We will need
4049 * to resend the login buffer with fewer queues requested.
4051 if (login_rsp_crq->generic.rc.code) {
4052 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4053 complete(&adapter->init_done);
4057 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4059 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4060 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4061 netdev_dbg(adapter->netdev, "%016lx\n",
4062 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4066 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4067 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4068 adapter->req_rx_add_queues !=
4069 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4070 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4071 ibmvnic_remove(adapter->vdev);
4074 release_login_buffer(adapter);
4075 complete(&adapter->init_done);
4080 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4081 struct ibmvnic_adapter *adapter)
4083 struct device *dev = &adapter->vdev->dev;
4086 rc = crq->request_unmap_rsp.rc.code;
4088 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4091 static void handle_query_map_rsp(union ibmvnic_crq *crq,
4092 struct ibmvnic_adapter *adapter)
4094 struct net_device *netdev = adapter->netdev;
4095 struct device *dev = &adapter->vdev->dev;
4098 rc = crq->query_map_rsp.rc.code;
4100 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4103 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4104 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4105 crq->query_map_rsp.free_pages);
4108 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4109 struct ibmvnic_adapter *adapter)
4111 struct net_device *netdev = adapter->netdev;
4112 struct device *dev = &adapter->vdev->dev;
4115 atomic_dec(&adapter->running_cap_crqs);
4116 netdev_dbg(netdev, "Outstanding queries: %d\n",
4117 atomic_read(&adapter->running_cap_crqs));
4118 rc = crq->query_capability.rc.code;
4120 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4124 switch (be16_to_cpu(crq->query_capability.capability)) {
4126 adapter->min_tx_queues =
4127 be64_to_cpu(crq->query_capability.number);
4128 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4129 adapter->min_tx_queues);
4132 adapter->min_rx_queues =
4133 be64_to_cpu(crq->query_capability.number);
4134 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4135 adapter->min_rx_queues);
4137 case MIN_RX_ADD_QUEUES:
4138 adapter->min_rx_add_queues =
4139 be64_to_cpu(crq->query_capability.number);
4140 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4141 adapter->min_rx_add_queues);
4144 adapter->max_tx_queues =
4145 be64_to_cpu(crq->query_capability.number);
4146 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4147 adapter->max_tx_queues);
4150 adapter->max_rx_queues =
4151 be64_to_cpu(crq->query_capability.number);
4152 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4153 adapter->max_rx_queues);
4155 case MAX_RX_ADD_QUEUES:
4156 adapter->max_rx_add_queues =
4157 be64_to_cpu(crq->query_capability.number);
4158 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4159 adapter->max_rx_add_queues);
4161 case MIN_TX_ENTRIES_PER_SUBCRQ:
4162 adapter->min_tx_entries_per_subcrq =
4163 be64_to_cpu(crq->query_capability.number);
4164 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4165 adapter->min_tx_entries_per_subcrq);
4167 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4168 adapter->min_rx_add_entries_per_subcrq =
4169 be64_to_cpu(crq->query_capability.number);
4170 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4171 adapter->min_rx_add_entries_per_subcrq);
4173 case MAX_TX_ENTRIES_PER_SUBCRQ:
4174 adapter->max_tx_entries_per_subcrq =
4175 be64_to_cpu(crq->query_capability.number);
4176 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4177 adapter->max_tx_entries_per_subcrq);
4179 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4180 adapter->max_rx_add_entries_per_subcrq =
4181 be64_to_cpu(crq->query_capability.number);
4182 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4183 adapter->max_rx_add_entries_per_subcrq);
4185 case TCP_IP_OFFLOAD:
4186 adapter->tcp_ip_offload =
4187 be64_to_cpu(crq->query_capability.number);
4188 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4189 adapter->tcp_ip_offload);
4191 case PROMISC_SUPPORTED:
4192 adapter->promisc_supported =
4193 be64_to_cpu(crq->query_capability.number);
4194 netdev_dbg(netdev, "promisc_supported = %lld\n",
4195 adapter->promisc_supported);
4198 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4199 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4200 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4203 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4204 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4205 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4207 case MAX_MULTICAST_FILTERS:
4208 adapter->max_multicast_filters =
4209 be64_to_cpu(crq->query_capability.number);
4210 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4211 adapter->max_multicast_filters);
4213 case VLAN_HEADER_INSERTION:
4214 adapter->vlan_header_insertion =
4215 be64_to_cpu(crq->query_capability.number);
4216 if (adapter->vlan_header_insertion)
4217 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4218 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4219 adapter->vlan_header_insertion);
4221 case RX_VLAN_HEADER_INSERTION:
4222 adapter->rx_vlan_header_insertion =
4223 be64_to_cpu(crq->query_capability.number);
4224 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4225 adapter->rx_vlan_header_insertion);
4227 case MAX_TX_SG_ENTRIES:
4228 adapter->max_tx_sg_entries =
4229 be64_to_cpu(crq->query_capability.number);
4230 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4231 adapter->max_tx_sg_entries);
4233 case RX_SG_SUPPORTED:
4234 adapter->rx_sg_supported =
4235 be64_to_cpu(crq->query_capability.number);
4236 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4237 adapter->rx_sg_supported);
4239 case OPT_TX_COMP_SUB_QUEUES:
4240 adapter->opt_tx_comp_sub_queues =
4241 be64_to_cpu(crq->query_capability.number);
4242 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4243 adapter->opt_tx_comp_sub_queues);
4245 case OPT_RX_COMP_QUEUES:
4246 adapter->opt_rx_comp_queues =
4247 be64_to_cpu(crq->query_capability.number);
4248 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4249 adapter->opt_rx_comp_queues);
4251 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4252 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4253 be64_to_cpu(crq->query_capability.number);
4254 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4255 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4257 case OPT_TX_ENTRIES_PER_SUBCRQ:
4258 adapter->opt_tx_entries_per_subcrq =
4259 be64_to_cpu(crq->query_capability.number);
4260 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4261 adapter->opt_tx_entries_per_subcrq);
4263 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4264 adapter->opt_rxba_entries_per_subcrq =
4265 be64_to_cpu(crq->query_capability.number);
4266 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4267 adapter->opt_rxba_entries_per_subcrq);
4269 case TX_RX_DESC_REQ:
4270 adapter->tx_rx_desc_req = crq->query_capability.number;
4271 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4272 adapter->tx_rx_desc_req);
4276 netdev_err(netdev, "Got invalid cap rsp %d\n",
4277 crq->query_capability.capability);
4281 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4282 adapter->wait_capability = false;
4283 ibmvnic_send_req_caps(adapter, 0);
4287 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4289 union ibmvnic_crq crq;
4292 memset(&crq, 0, sizeof(crq));
4293 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4294 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4295 init_completion(&adapter->fw_done);
4296 rc = ibmvnic_send_crq(adapter, &crq);
4299 wait_for_completion(&adapter->fw_done);
4300 return adapter->fw_done_rc ? -EIO : 0;
4303 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4304 struct ibmvnic_adapter *adapter)
4306 struct net_device *netdev = adapter->netdev;
4309 rc = crq->query_phys_parms_rsp.rc.code;
4311 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4314 switch (cpu_to_be32(crq->query_phys_parms_rsp.speed)) {
4315 case IBMVNIC_10MBPS:
4316 adapter->speed = SPEED_10;
4318 case IBMVNIC_100MBPS:
4319 adapter->speed = SPEED_100;
4322 adapter->speed = SPEED_1000;
4325 adapter->speed = SPEED_10000;
4327 case IBMVNIC_25GBPS:
4328 adapter->speed = SPEED_25000;
4330 case IBMVNIC_40GBPS:
4331 adapter->speed = SPEED_40000;
4333 case IBMVNIC_50GBPS:
4334 adapter->speed = SPEED_50000;
4336 case IBMVNIC_100GBPS:
4337 adapter->speed = SPEED_100000;
4340 netdev_warn(netdev, "Unknown speed 0x%08x\n",
4341 cpu_to_be32(crq->query_phys_parms_rsp.speed));
4342 adapter->speed = SPEED_UNKNOWN;
4344 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4345 adapter->duplex = DUPLEX_FULL;
4346 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4347 adapter->duplex = DUPLEX_HALF;
4349 adapter->duplex = DUPLEX_UNKNOWN;
4354 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4355 struct ibmvnic_adapter *adapter)
4357 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4358 struct net_device *netdev = adapter->netdev;
4359 struct device *dev = &adapter->vdev->dev;
4360 u64 *u64_crq = (u64 *)crq;
4363 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4364 (unsigned long int)cpu_to_be64(u64_crq[0]),
4365 (unsigned long int)cpu_to_be64(u64_crq[1]));
4366 switch (gen_crq->first) {
4367 case IBMVNIC_CRQ_INIT_RSP:
4368 switch (gen_crq->cmd) {
4369 case IBMVNIC_CRQ_INIT:
4370 dev_info(dev, "Partner initialized\n");
4371 adapter->from_passive_init = true;
4372 adapter->failover_pending = false;
4373 if (!completion_done(&adapter->init_done)) {
4374 complete(&adapter->init_done);
4375 adapter->init_done_rc = -EIO;
4377 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4379 case IBMVNIC_CRQ_INIT_COMPLETE:
4380 dev_info(dev, "Partner initialization complete\n");
4381 adapter->crq.active = true;
4382 send_version_xchg(adapter);
4385 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4388 case IBMVNIC_CRQ_XPORT_EVENT:
4389 netif_carrier_off(netdev);
4390 adapter->crq.active = false;
4391 if (adapter->resetting)
4392 adapter->force_reset_recovery = true;
4393 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4394 dev_info(dev, "Migrated, re-enabling adapter\n");
4395 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4396 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4397 dev_info(dev, "Backing device failover detected\n");
4398 adapter->failover_pending = true;
4400 /* The adapter lost the connection */
4401 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4403 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4406 case IBMVNIC_CRQ_CMD_RSP:
4409 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4414 switch (gen_crq->cmd) {
4415 case VERSION_EXCHANGE_RSP:
4416 rc = crq->version_exchange_rsp.rc.code;
4418 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4421 dev_info(dev, "Partner protocol version is %d\n",
4422 crq->version_exchange_rsp.version);
4423 if (be16_to_cpu(crq->version_exchange_rsp.version) <
4426 be16_to_cpu(crq->version_exchange_rsp.version);
4427 send_cap_queries(adapter);
4429 case QUERY_CAPABILITY_RSP:
4430 handle_query_cap_rsp(crq, adapter);
4433 handle_query_map_rsp(crq, adapter);
4435 case REQUEST_MAP_RSP:
4436 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4437 complete(&adapter->fw_done);
4439 case REQUEST_UNMAP_RSP:
4440 handle_request_unmap_rsp(crq, adapter);
4442 case REQUEST_CAPABILITY_RSP:
4443 handle_request_cap_rsp(crq, adapter);
4446 netdev_dbg(netdev, "Got Login Response\n");
4447 handle_login_rsp(crq, adapter);
4449 case LOGICAL_LINK_STATE_RSP:
4451 "Got Logical Link State Response, state: %d rc: %d\n",
4452 crq->logical_link_state_rsp.link_state,
4453 crq->logical_link_state_rsp.rc.code);
4454 adapter->logical_link_state =
4455 crq->logical_link_state_rsp.link_state;
4456 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4457 complete(&adapter->init_done);
4459 case LINK_STATE_INDICATION:
4460 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4461 adapter->phys_link_state =
4462 crq->link_state_indication.phys_link_state;
4463 adapter->logical_link_state =
4464 crq->link_state_indication.logical_link_state;
4465 if (adapter->phys_link_state && adapter->logical_link_state)
4466 netif_carrier_on(netdev);
4468 netif_carrier_off(netdev);
4470 case CHANGE_MAC_ADDR_RSP:
4471 netdev_dbg(netdev, "Got MAC address change Response\n");
4472 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4474 case ERROR_INDICATION:
4475 netdev_dbg(netdev, "Got Error Indication\n");
4476 handle_error_indication(crq, adapter);
4478 case REQUEST_STATISTICS_RSP:
4479 netdev_dbg(netdev, "Got Statistics Response\n");
4480 complete(&adapter->stats_done);
4482 case QUERY_IP_OFFLOAD_RSP:
4483 netdev_dbg(netdev, "Got Query IP offload Response\n");
4484 handle_query_ip_offload_rsp(adapter);
4486 case MULTICAST_CTRL_RSP:
4487 netdev_dbg(netdev, "Got multicast control Response\n");
4489 case CONTROL_IP_OFFLOAD_RSP:
4490 netdev_dbg(netdev, "Got Control IP offload Response\n");
4491 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4492 sizeof(adapter->ip_offload_ctrl),
4494 complete(&adapter->init_done);
4496 case COLLECT_FW_TRACE_RSP:
4497 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4498 complete(&adapter->fw_done);
4500 case GET_VPD_SIZE_RSP:
4501 handle_vpd_size_rsp(crq, adapter);
4504 handle_vpd_rsp(crq, adapter);
4506 case QUERY_PHYS_PARMS_RSP:
4507 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
4508 complete(&adapter->fw_done);
4511 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4516 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4518 struct ibmvnic_adapter *adapter = instance;
4520 tasklet_schedule(&adapter->tasklet);
4524 static void ibmvnic_tasklet(void *data)
4526 struct ibmvnic_adapter *adapter = data;
4527 struct ibmvnic_crq_queue *queue = &adapter->crq;
4528 union ibmvnic_crq *crq;
4529 unsigned long flags;
4532 spin_lock_irqsave(&queue->lock, flags);
4534 /* Pull all the valid messages off the CRQ */
4535 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4536 ibmvnic_handle_crq(crq, adapter);
4537 crq->generic.first = 0;
4540 /* remain in tasklet until all
4541 * capabilities responses are received
4543 if (!adapter->wait_capability)
4546 /* if capabilities CRQ's were sent in this tasklet, the following
4547 * tasklet must wait until all responses are received
4549 if (atomic_read(&adapter->running_cap_crqs) != 0)
4550 adapter->wait_capability = true;
4551 spin_unlock_irqrestore(&queue->lock, flags);
4554 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4556 struct vio_dev *vdev = adapter->vdev;
4560 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4561 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4564 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4569 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4571 struct ibmvnic_crq_queue *crq = &adapter->crq;
4572 struct device *dev = &adapter->vdev->dev;
4573 struct vio_dev *vdev = adapter->vdev;
4578 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4579 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4581 /* Clean out the queue */
4582 memset(crq->msgs, 0, PAGE_SIZE);
4584 crq->active = false;
4586 /* And re-open it again */
4587 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4588 crq->msg_token, PAGE_SIZE);
4591 /* Adapter is good, but other end is not ready */
4592 dev_warn(dev, "Partner adapter not ready\n");
4594 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4599 static void release_crq_queue(struct ibmvnic_adapter *adapter)
4601 struct ibmvnic_crq_queue *crq = &adapter->crq;
4602 struct vio_dev *vdev = adapter->vdev;
4608 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4609 free_irq(vdev->irq, adapter);
4610 tasklet_kill(&adapter->tasklet);
4612 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4613 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4615 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4617 free_page((unsigned long)crq->msgs);
4619 crq->active = false;
4622 static int init_crq_queue(struct ibmvnic_adapter *adapter)
4624 struct ibmvnic_crq_queue *crq = &adapter->crq;
4625 struct device *dev = &adapter->vdev->dev;
4626 struct vio_dev *vdev = adapter->vdev;
4627 int rc, retrc = -ENOMEM;
4632 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4633 /* Should we allocate more than one page? */
4638 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4639 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4641 if (dma_mapping_error(dev, crq->msg_token))
4644 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4645 crq->msg_token, PAGE_SIZE);
4647 if (rc == H_RESOURCE)
4648 /* maybe kexecing and resource is busy. try a reset */
4649 rc = ibmvnic_reset_crq(adapter);
4652 if (rc == H_CLOSED) {
4653 dev_warn(dev, "Partner adapter not ready\n");
4655 dev_warn(dev, "Error %d opening adapter\n", rc);
4656 goto reg_crq_failed;
4661 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4662 (unsigned long)adapter);
4664 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4665 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
4666 adapter->vdev->unit_address);
4667 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
4669 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4671 goto req_irq_failed;
4674 rc = vio_enable_interrupts(vdev);
4676 dev_err(dev, "Error %d enabling interrupts\n", rc);
4677 goto req_irq_failed;
4681 spin_lock_init(&crq->lock);
4686 tasklet_kill(&adapter->tasklet);
4688 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4689 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4691 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4693 free_page((unsigned long)crq->msgs);
4698 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
4700 struct device *dev = &adapter->vdev->dev;
4701 unsigned long timeout = msecs_to_jiffies(30000);
4702 u64 old_num_rx_queues, old_num_tx_queues;
4705 adapter->from_passive_init = false;
4707 old_num_rx_queues = adapter->req_rx_queues;
4708 old_num_tx_queues = adapter->req_tx_queues;
4710 reinit_completion(&adapter->init_done);
4711 adapter->init_done_rc = 0;
4712 ibmvnic_send_crq_init(adapter);
4713 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4714 dev_err(dev, "Initialization sequence timed out\n");
4718 if (adapter->init_done_rc) {
4719 release_crq_queue(adapter);
4720 return adapter->init_done_rc;
4723 if (adapter->from_passive_init) {
4724 adapter->state = VNIC_OPEN;
4725 adapter->from_passive_init = false;
4729 if (adapter->resetting && !adapter->wait_for_reset &&
4730 adapter->reset_reason != VNIC_RESET_MOBILITY) {
4731 if (adapter->req_rx_queues != old_num_rx_queues ||
4732 adapter->req_tx_queues != old_num_tx_queues) {
4733 release_sub_crqs(adapter, 0);
4734 rc = init_sub_crqs(adapter);
4736 rc = reset_sub_crq_queues(adapter);
4739 rc = init_sub_crqs(adapter);
4743 dev_err(dev, "Initialization of sub crqs failed\n");
4744 release_crq_queue(adapter);
4748 rc = init_sub_crq_irqs(adapter);
4750 dev_err(dev, "Failed to initialize sub crq irqs\n");
4751 release_crq_queue(adapter);
4757 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4759 struct device *dev = &adapter->vdev->dev;
4760 unsigned long timeout = msecs_to_jiffies(30000);
4763 adapter->from_passive_init = false;
4765 adapter->init_done_rc = 0;
4766 ibmvnic_send_crq_init(adapter);
4767 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4768 dev_err(dev, "Initialization sequence timed out\n");
4772 if (adapter->init_done_rc) {
4773 release_crq_queue(adapter);
4774 return adapter->init_done_rc;
4777 if (adapter->from_passive_init) {
4778 adapter->state = VNIC_OPEN;
4779 adapter->from_passive_init = false;
4783 rc = init_sub_crqs(adapter);
4785 dev_err(dev, "Initialization of sub crqs failed\n");
4786 release_crq_queue(adapter);
4790 rc = init_sub_crq_irqs(adapter);
4792 dev_err(dev, "Failed to initialize sub crq irqs\n");
4793 release_crq_queue(adapter);
4799 static struct device_attribute dev_attr_failover;
4801 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4803 struct ibmvnic_adapter *adapter;
4804 struct net_device *netdev;
4805 unsigned char *mac_addr_p;
4808 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
4811 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
4812 VETH_MAC_ADDR, NULL);
4815 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4816 __FILE__, __LINE__);
4820 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
4821 IBMVNIC_MAX_QUEUES);
4825 adapter = netdev_priv(netdev);
4826 adapter->state = VNIC_PROBING;
4827 dev_set_drvdata(&dev->dev, netdev);
4828 adapter->vdev = dev;
4829 adapter->netdev = netdev;
4831 ether_addr_copy(adapter->mac_addr, mac_addr_p);
4832 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4833 netdev->irq = dev->irq;
4834 netdev->netdev_ops = &ibmvnic_netdev_ops;
4835 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
4836 SET_NETDEV_DEV(netdev, &dev->dev);
4838 spin_lock_init(&adapter->stats_lock);
4840 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4841 INIT_LIST_HEAD(&adapter->rwi_list);
4842 spin_lock_init(&adapter->rwi_lock);
4843 init_completion(&adapter->init_done);
4844 adapter->resetting = false;
4847 rc = init_crq_queue(adapter);
4849 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
4851 goto ibmvnic_init_fail;
4854 rc = ibmvnic_init(adapter);
4855 if (rc && rc != EAGAIN)
4856 goto ibmvnic_init_fail;
4857 } while (rc == EAGAIN);
4859 rc = init_stats_buffers(adapter);
4861 goto ibmvnic_init_fail;
4863 rc = init_stats_token(adapter);
4865 goto ibmvnic_stats_fail;
4867 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4868 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4869 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4871 rc = device_create_file(&dev->dev, &dev_attr_failover);
4873 goto ibmvnic_dev_file_err;
4875 netif_carrier_off(netdev);
4876 rc = register_netdev(netdev);
4878 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
4879 goto ibmvnic_register_fail;
4881 dev_info(&dev->dev, "ibmvnic registered\n");
4883 adapter->state = VNIC_PROBED;
4885 adapter->wait_for_reset = false;
4889 ibmvnic_register_fail:
4890 device_remove_file(&dev->dev, &dev_attr_failover);
4892 ibmvnic_dev_file_err:
4893 release_stats_token(adapter);
4896 release_stats_buffers(adapter);
4899 release_sub_crqs(adapter, 1);
4900 release_crq_queue(adapter);
4901 free_netdev(netdev);
4906 static int ibmvnic_remove(struct vio_dev *dev)
4908 struct net_device *netdev = dev_get_drvdata(&dev->dev);
4909 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4911 adapter->state = VNIC_REMOVING;
4913 unregister_netdevice(netdev);
4915 release_resources(adapter);
4916 release_sub_crqs(adapter, 1);
4917 release_crq_queue(adapter);
4919 release_stats_token(adapter);
4920 release_stats_buffers(adapter);
4922 adapter->state = VNIC_REMOVED;
4925 device_remove_file(&dev->dev, &dev_attr_failover);
4926 free_netdev(netdev);
4927 dev_set_drvdata(&dev->dev, NULL);
4932 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
4933 const char *buf, size_t count)
4935 struct net_device *netdev = dev_get_drvdata(dev);
4936 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4937 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
4938 __be64 session_token;
4941 if (!sysfs_streq(buf, "1"))
4944 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
4945 H_GET_SESSION_TOKEN, 0, 0, 0);
4947 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
4952 session_token = (__be64)retbuf[0];
4953 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
4954 be64_to_cpu(session_token));
4955 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4956 H_SESSION_ERR_DETECTED, session_token, 0, 0);
4958 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
4966 static DEVICE_ATTR_WO(failover);
4968 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
4970 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
4971 struct ibmvnic_adapter *adapter;
4972 struct iommu_table *tbl;
4973 unsigned long ret = 0;
4976 tbl = get_iommu_table_base(&vdev->dev);
4978 /* netdev inits at probe time along with the structures we need below*/
4980 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
4982 adapter = netdev_priv(netdev);
4984 ret += PAGE_SIZE; /* the crq message queue */
4985 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
4987 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
4988 ret += 4 * PAGE_SIZE; /* the scrq message queue */
4990 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4992 ret += adapter->rx_pool[i].size *
4993 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
4998 static int ibmvnic_resume(struct device *dev)
5000 struct net_device *netdev = dev_get_drvdata(dev);
5001 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5003 if (adapter->state != VNIC_OPEN)
5006 tasklet_schedule(&adapter->tasklet);
5011 static const struct vio_device_id ibmvnic_device_table[] = {
5012 {"network", "IBM,vnic"},
5015 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5017 static const struct dev_pm_ops ibmvnic_pm_ops = {
5018 .resume = ibmvnic_resume
5021 static struct vio_driver ibmvnic_driver = {
5022 .id_table = ibmvnic_device_table,
5023 .probe = ibmvnic_probe,
5024 .remove = ibmvnic_remove,
5025 .get_desired_dma = ibmvnic_get_desired_dma,
5026 .name = ibmvnic_driver_name,
5027 .pm = &ibmvnic_pm_ops,
5030 /* module functions */
5031 static int __init ibmvnic_module_init(void)
5033 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5034 IBMVNIC_DRIVER_VERSION);
5036 return vio_register_driver(&ibmvnic_driver);
5039 static void __exit ibmvnic_module_exit(void)
5041 vio_unregister_driver(&ibmvnic_driver);
5044 module_init(ibmvnic_module_init);
5045 module_exit(ibmvnic_module_exit);