1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
62 #include <linux/if_arp.h>
65 #include <linux/ipv6.h>
66 #include <linux/irq.h>
67 #include <linux/kthread.h>
68 #include <linux/seq_file.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/workqueue.h>
78 #include <linux/if_vlan.h>
79 #include <linux/utsname.h>
83 static const char ibmvnic_driver_name[] = "ibmvnic";
84 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
86 MODULE_AUTHOR("Santiago Leon");
87 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
91 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
92 static int ibmvnic_remove(struct vio_dev *);
93 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
94 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
95 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
96 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
97 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
98 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
99 union sub_crq *sub_crq);
100 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
101 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
102 static int enable_scrq_irq(struct ibmvnic_adapter *,
103 struct ibmvnic_sub_crq_queue *);
104 static int disable_scrq_irq(struct ibmvnic_adapter *,
105 struct ibmvnic_sub_crq_queue *);
106 static int pending_scrq(struct ibmvnic_adapter *,
107 struct ibmvnic_sub_crq_queue *);
108 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
109 struct ibmvnic_sub_crq_queue *);
110 static int ibmvnic_poll(struct napi_struct *napi, int data);
111 static void send_map_query(struct ibmvnic_adapter *adapter);
112 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
113 static int send_request_unmap(struct ibmvnic_adapter *, u8);
114 static int send_login(struct ibmvnic_adapter *adapter);
115 static void send_cap_queries(struct ibmvnic_adapter *adapter);
116 static int init_sub_crqs(struct ibmvnic_adapter *);
117 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
118 static int ibmvnic_init(struct ibmvnic_adapter *);
119 static int ibmvnic_reset_init(struct ibmvnic_adapter *);
120 static void release_crq_queue(struct ibmvnic_adapter *);
121 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
122 static int init_crq_queue(struct ibmvnic_adapter *adapter);
124 struct ibmvnic_stat {
125 char name[ETH_GSTRING_LEN];
129 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
130 offsetof(struct ibmvnic_statistics, stat))
131 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
133 static const struct ibmvnic_stat ibmvnic_stats[] = {
134 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
135 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
136 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
137 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
138 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
139 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
140 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
141 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
142 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
143 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
144 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
145 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
146 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
147 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
148 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
149 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
150 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
151 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
152 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
153 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
154 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
155 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
158 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
159 unsigned long length, unsigned long *number,
162 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
165 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
172 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
173 struct ibmvnic_long_term_buff *ltb, int size)
175 struct device *dev = &adapter->vdev->dev;
179 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
183 dev_err(dev, "Couldn't alloc long term buffer\n");
186 ltb->map_id = adapter->map_id;
189 init_completion(&adapter->fw_done);
190 rc = send_request_map(adapter, ltb->addr,
191 ltb->size, ltb->map_id);
193 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
196 wait_for_completion(&adapter->fw_done);
198 if (adapter->fw_done_rc) {
199 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
200 adapter->fw_done_rc);
201 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
207 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
208 struct ibmvnic_long_term_buff *ltb)
210 struct device *dev = &adapter->vdev->dev;
215 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
216 adapter->reset_reason != VNIC_RESET_MOBILITY)
217 send_request_unmap(adapter, ltb->map_id);
218 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
221 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
222 struct ibmvnic_long_term_buff *ltb)
226 memset(ltb->buff, 0, ltb->size);
228 init_completion(&adapter->fw_done);
229 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
232 wait_for_completion(&adapter->fw_done);
234 if (adapter->fw_done_rc) {
235 dev_info(&adapter->vdev->dev,
236 "Reset failed, attempting to free and reallocate buffer\n");
237 free_long_term_buff(adapter, ltb);
238 return alloc_long_term_buff(adapter, ltb, ltb->size);
243 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
247 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
249 adapter->rx_pool[i].active = 0;
252 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
253 struct ibmvnic_rx_pool *pool)
255 int count = pool->size - atomic_read(&pool->available);
256 struct device *dev = &adapter->vdev->dev;
257 int buffers_added = 0;
258 unsigned long lpar_rc;
259 union sub_crq sub_crq;
272 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
273 be32_to_cpu(adapter->login_rsp_buf->
276 for (i = 0; i < count; ++i) {
277 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
279 dev_err(dev, "Couldn't replenish rx buff\n");
280 adapter->replenish_no_mem++;
284 index = pool->free_map[pool->next_free];
286 if (pool->rx_buff[index].skb)
287 dev_err(dev, "Inconsistent free_map!\n");
289 /* Copy the skb to the long term mapped DMA buffer */
290 offset = index * pool->buff_size;
291 dst = pool->long_term_buff.buff + offset;
292 memset(dst, 0, pool->buff_size);
293 dma_addr = pool->long_term_buff.addr + offset;
294 pool->rx_buff[index].data = dst;
296 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
297 pool->rx_buff[index].dma = dma_addr;
298 pool->rx_buff[index].skb = skb;
299 pool->rx_buff[index].pool_index = pool->index;
300 pool->rx_buff[index].size = pool->buff_size;
302 memset(&sub_crq, 0, sizeof(sub_crq));
303 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
304 sub_crq.rx_add.correlator =
305 cpu_to_be64((u64)&pool->rx_buff[index]);
306 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
307 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
309 /* The length field of the sCRQ is defined to be 24 bits so the
310 * buffer size needs to be left shifted by a byte before it is
311 * converted to big endian to prevent the last byte from being
314 #ifdef __LITTLE_ENDIAN__
317 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
319 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
321 if (lpar_rc != H_SUCCESS)
325 adapter->replenish_add_buff_success++;
326 pool->next_free = (pool->next_free + 1) % pool->size;
328 atomic_add(buffers_added, &pool->available);
332 dev_info(dev, "replenish pools failure\n");
333 pool->free_map[pool->next_free] = index;
334 pool->rx_buff[index].skb = NULL;
336 dev_kfree_skb_any(skb);
337 adapter->replenish_add_buff_failure++;
338 atomic_add(buffers_added, &pool->available);
340 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
341 /* Disable buffer pool replenishment and report carrier off if
342 * queue is closed or pending failover.
343 * Firmware guarantees that a signal will be sent to the
344 * driver, triggering a reset.
346 deactivate_rx_pools(adapter);
347 netif_carrier_off(adapter->netdev);
351 static void replenish_pools(struct ibmvnic_adapter *adapter)
355 adapter->replenish_task_cycles++;
356 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
358 if (adapter->rx_pool[i].active)
359 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
363 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
365 kfree(adapter->tx_stats_buffers);
366 kfree(adapter->rx_stats_buffers);
367 adapter->tx_stats_buffers = NULL;
368 adapter->rx_stats_buffers = NULL;
371 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
373 adapter->tx_stats_buffers =
374 kcalloc(IBMVNIC_MAX_QUEUES,
375 sizeof(struct ibmvnic_tx_queue_stats),
377 if (!adapter->tx_stats_buffers)
380 adapter->rx_stats_buffers =
381 kcalloc(IBMVNIC_MAX_QUEUES,
382 sizeof(struct ibmvnic_rx_queue_stats),
384 if (!adapter->rx_stats_buffers)
390 static void release_stats_token(struct ibmvnic_adapter *adapter)
392 struct device *dev = &adapter->vdev->dev;
394 if (!adapter->stats_token)
397 dma_unmap_single(dev, adapter->stats_token,
398 sizeof(struct ibmvnic_statistics),
400 adapter->stats_token = 0;
403 static int init_stats_token(struct ibmvnic_adapter *adapter)
405 struct device *dev = &adapter->vdev->dev;
408 stok = dma_map_single(dev, &adapter->stats,
409 sizeof(struct ibmvnic_statistics),
411 if (dma_mapping_error(dev, stok)) {
412 dev_err(dev, "Couldn't map stats buffer\n");
416 adapter->stats_token = stok;
417 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
421 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
423 struct ibmvnic_rx_pool *rx_pool;
428 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
429 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
431 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
432 for (i = 0; i < rx_scrqs; i++) {
433 rx_pool = &adapter->rx_pool[i];
435 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
437 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
438 free_long_term_buff(adapter, &rx_pool->long_term_buff);
439 rx_pool->buff_size = be64_to_cpu(size_array[i]);
440 alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
444 rc = reset_long_term_buff(adapter,
445 &rx_pool->long_term_buff);
451 for (j = 0; j < rx_pool->size; j++)
452 rx_pool->free_map[j] = j;
454 memset(rx_pool->rx_buff, 0,
455 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
457 atomic_set(&rx_pool->available, 0);
458 rx_pool->next_alloc = 0;
459 rx_pool->next_free = 0;
466 static void release_rx_pools(struct ibmvnic_adapter *adapter)
468 struct ibmvnic_rx_pool *rx_pool;
471 if (!adapter->rx_pool)
474 for (i = 0; i < adapter->num_active_rx_pools; i++) {
475 rx_pool = &adapter->rx_pool[i];
477 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
479 kfree(rx_pool->free_map);
480 free_long_term_buff(adapter, &rx_pool->long_term_buff);
482 if (!rx_pool->rx_buff)
485 for (j = 0; j < rx_pool->size; j++) {
486 if (rx_pool->rx_buff[j].skb) {
487 dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
488 rx_pool->rx_buff[i].skb = NULL;
492 kfree(rx_pool->rx_buff);
495 kfree(adapter->rx_pool);
496 adapter->rx_pool = NULL;
497 adapter->num_active_rx_pools = 0;
500 static int init_rx_pools(struct net_device *netdev)
502 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
503 struct device *dev = &adapter->vdev->dev;
504 struct ibmvnic_rx_pool *rx_pool;
510 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
511 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
512 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
514 adapter->rx_pool = kcalloc(rxadd_subcrqs,
515 sizeof(struct ibmvnic_rx_pool),
517 if (!adapter->rx_pool) {
518 dev_err(dev, "Failed to allocate rx pools\n");
522 adapter->num_active_rx_pools = rxadd_subcrqs;
524 for (i = 0; i < rxadd_subcrqs; i++) {
525 rx_pool = &adapter->rx_pool[i];
527 netdev_dbg(adapter->netdev,
528 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
529 i, adapter->req_rx_add_entries_per_subcrq,
530 be64_to_cpu(size_array[i]));
532 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
534 rx_pool->buff_size = be64_to_cpu(size_array[i]);
537 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
539 if (!rx_pool->free_map) {
540 release_rx_pools(adapter);
544 rx_pool->rx_buff = kcalloc(rx_pool->size,
545 sizeof(struct ibmvnic_rx_buff),
547 if (!rx_pool->rx_buff) {
548 dev_err(dev, "Couldn't alloc rx buffers\n");
549 release_rx_pools(adapter);
553 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
554 rx_pool->size * rx_pool->buff_size)) {
555 release_rx_pools(adapter);
559 for (j = 0; j < rx_pool->size; ++j)
560 rx_pool->free_map[j] = j;
562 atomic_set(&rx_pool->available, 0);
563 rx_pool->next_alloc = 0;
564 rx_pool->next_free = 0;
570 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
571 struct ibmvnic_tx_pool *tx_pool)
575 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
579 memset(tx_pool->tx_buff, 0,
580 tx_pool->num_buffers *
581 sizeof(struct ibmvnic_tx_buff));
583 for (i = 0; i < tx_pool->num_buffers; i++)
584 tx_pool->free_map[i] = i;
586 tx_pool->consumer_index = 0;
587 tx_pool->producer_index = 0;
592 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
597 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
598 for (i = 0; i < tx_scrqs; i++) {
599 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
602 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
610 static void release_vpd_data(struct ibmvnic_adapter *adapter)
615 kfree(adapter->vpd->buff);
621 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
622 struct ibmvnic_tx_pool *tx_pool)
624 kfree(tx_pool->tx_buff);
625 kfree(tx_pool->free_map);
626 free_long_term_buff(adapter, &tx_pool->long_term_buff);
629 static void release_tx_pools(struct ibmvnic_adapter *adapter)
633 if (!adapter->tx_pool)
636 for (i = 0; i < adapter->num_active_tx_pools; i++) {
637 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
638 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
641 kfree(adapter->tx_pool);
642 adapter->tx_pool = NULL;
643 kfree(adapter->tso_pool);
644 adapter->tso_pool = NULL;
645 adapter->num_active_tx_pools = 0;
648 static int init_one_tx_pool(struct net_device *netdev,
649 struct ibmvnic_tx_pool *tx_pool,
650 int num_entries, int buf_size)
652 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
655 tx_pool->tx_buff = kcalloc(num_entries,
656 sizeof(struct ibmvnic_tx_buff),
658 if (!tx_pool->tx_buff)
661 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
662 num_entries * buf_size))
665 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
666 if (!tx_pool->free_map)
669 for (i = 0; i < num_entries; i++)
670 tx_pool->free_map[i] = i;
672 tx_pool->consumer_index = 0;
673 tx_pool->producer_index = 0;
674 tx_pool->num_buffers = num_entries;
675 tx_pool->buf_size = buf_size;
680 static int init_tx_pools(struct net_device *netdev)
682 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
686 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
687 adapter->tx_pool = kcalloc(tx_subcrqs,
688 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
689 if (!adapter->tx_pool)
692 adapter->tso_pool = kcalloc(tx_subcrqs,
693 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
694 if (!adapter->tso_pool)
697 adapter->num_active_tx_pools = tx_subcrqs;
699 for (i = 0; i < tx_subcrqs; i++) {
700 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
701 adapter->req_tx_entries_per_subcrq,
702 adapter->req_mtu + VLAN_HLEN);
704 release_tx_pools(adapter);
708 init_one_tx_pool(netdev, &adapter->tso_pool[i],
712 release_tx_pools(adapter);
720 static void release_error_buffers(struct ibmvnic_adapter *adapter)
722 struct device *dev = &adapter->vdev->dev;
723 struct ibmvnic_error_buff *error_buff, *tmp;
726 spin_lock_irqsave(&adapter->error_list_lock, flags);
727 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
728 list_del(&error_buff->list);
729 dma_unmap_single(dev, error_buff->dma, error_buff->len,
731 kfree(error_buff->buff);
734 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
737 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
741 if (adapter->napi_enabled)
744 for (i = 0; i < adapter->req_rx_queues; i++)
745 napi_enable(&adapter->napi[i]);
747 adapter->napi_enabled = true;
750 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
754 if (!adapter->napi_enabled)
757 for (i = 0; i < adapter->req_rx_queues; i++) {
758 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
759 napi_disable(&adapter->napi[i]);
762 adapter->napi_enabled = false;
765 static int init_napi(struct ibmvnic_adapter *adapter)
769 adapter->napi = kcalloc(adapter->req_rx_queues,
770 sizeof(struct napi_struct), GFP_KERNEL);
774 for (i = 0; i < adapter->req_rx_queues; i++) {
775 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
776 netif_napi_add(adapter->netdev, &adapter->napi[i],
777 ibmvnic_poll, NAPI_POLL_WEIGHT);
780 adapter->num_active_rx_napi = adapter->req_rx_queues;
784 static void release_napi(struct ibmvnic_adapter *adapter)
791 for (i = 0; i < adapter->num_active_rx_napi; i++) {
792 if (&adapter->napi[i]) {
793 netdev_dbg(adapter->netdev,
794 "Releasing napi[%d]\n", i);
795 netif_napi_del(&adapter->napi[i]);
799 kfree(adapter->napi);
800 adapter->napi = NULL;
801 adapter->num_active_rx_napi = 0;
802 adapter->napi_enabled = false;
805 static int ibmvnic_login(struct net_device *netdev)
807 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
808 unsigned long timeout = msecs_to_jiffies(30000);
813 if (retry_count > IBMVNIC_MAX_QUEUES) {
814 netdev_warn(netdev, "Login attempts exceeded\n");
818 adapter->init_done_rc = 0;
819 reinit_completion(&adapter->init_done);
820 rc = send_login(adapter);
822 netdev_warn(netdev, "Unable to login\n");
826 if (!wait_for_completion_timeout(&adapter->init_done,
828 netdev_warn(netdev, "Login timed out\n");
832 if (adapter->init_done_rc == PARTIALSUCCESS) {
834 release_sub_crqs(adapter, 1);
836 adapter->init_done_rc = 0;
837 reinit_completion(&adapter->init_done);
838 send_cap_queries(adapter);
839 if (!wait_for_completion_timeout(&adapter->init_done,
842 "Capabilities query timed out\n");
846 rc = init_sub_crqs(adapter);
849 "SCRQ initialization failed\n");
853 rc = init_sub_crq_irqs(adapter);
856 "SCRQ irq initialization failed\n");
859 } else if (adapter->init_done_rc) {
860 netdev_warn(netdev, "Adapter login failed\n");
863 } while (adapter->init_done_rc == PARTIALSUCCESS);
865 /* handle pending MAC address changes after successful login */
866 if (adapter->mac_change_pending) {
867 __ibmvnic_set_mac(netdev, &adapter->desired.mac);
868 adapter->mac_change_pending = false;
874 static void release_login_buffer(struct ibmvnic_adapter *adapter)
876 kfree(adapter->login_buf);
877 adapter->login_buf = NULL;
880 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
882 kfree(adapter->login_rsp_buf);
883 adapter->login_rsp_buf = NULL;
886 static void release_resources(struct ibmvnic_adapter *adapter)
888 release_vpd_data(adapter);
890 release_tx_pools(adapter);
891 release_rx_pools(adapter);
893 release_error_buffers(adapter);
894 release_napi(adapter);
895 release_login_rsp_buffer(adapter);
898 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
900 struct net_device *netdev = adapter->netdev;
901 unsigned long timeout = msecs_to_jiffies(30000);
902 union ibmvnic_crq crq;
906 netdev_dbg(netdev, "setting link state %d\n", link_state);
908 memset(&crq, 0, sizeof(crq));
909 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
910 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
911 crq.logical_link_state.link_state = link_state;
916 reinit_completion(&adapter->init_done);
917 rc = ibmvnic_send_crq(adapter, &crq);
919 netdev_err(netdev, "Failed to set link state\n");
923 if (!wait_for_completion_timeout(&adapter->init_done,
925 netdev_err(netdev, "timeout setting link state\n");
929 if (adapter->init_done_rc == 1) {
930 /* Partuial success, delay and re-send */
933 } else if (adapter->init_done_rc) {
934 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
935 adapter->init_done_rc);
936 return adapter->init_done_rc;
943 static int set_real_num_queues(struct net_device *netdev)
945 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
948 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
949 adapter->req_tx_queues, adapter->req_rx_queues);
951 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
953 netdev_err(netdev, "failed to set the number of tx queues\n");
957 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
959 netdev_err(netdev, "failed to set the number of rx queues\n");
964 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
966 struct device *dev = &adapter->vdev->dev;
967 union ibmvnic_crq crq;
971 if (adapter->vpd->buff)
972 len = adapter->vpd->len;
974 init_completion(&adapter->fw_done);
975 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
976 crq.get_vpd_size.cmd = GET_VPD_SIZE;
977 rc = ibmvnic_send_crq(adapter, &crq);
980 wait_for_completion(&adapter->fw_done);
982 if (!adapter->vpd->len)
985 if (!adapter->vpd->buff)
986 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
987 else if (adapter->vpd->len != len)
989 krealloc(adapter->vpd->buff,
990 adapter->vpd->len, GFP_KERNEL);
992 if (!adapter->vpd->buff) {
993 dev_err(dev, "Could allocate VPD buffer\n");
997 adapter->vpd->dma_addr =
998 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1000 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1001 dev_err(dev, "Could not map VPD buffer\n");
1002 kfree(adapter->vpd->buff);
1003 adapter->vpd->buff = NULL;
1007 reinit_completion(&adapter->fw_done);
1008 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1009 crq.get_vpd.cmd = GET_VPD;
1010 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1011 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1012 rc = ibmvnic_send_crq(adapter, &crq);
1014 kfree(adapter->vpd->buff);
1015 adapter->vpd->buff = NULL;
1018 wait_for_completion(&adapter->fw_done);
1023 static int init_resources(struct ibmvnic_adapter *adapter)
1025 struct net_device *netdev = adapter->netdev;
1028 rc = set_real_num_queues(netdev);
1032 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1036 /* Vital Product Data (VPD) */
1037 rc = ibmvnic_get_vpd(adapter);
1039 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1043 adapter->map_id = 1;
1045 rc = init_napi(adapter);
1049 send_map_query(adapter);
1051 rc = init_rx_pools(netdev);
1055 rc = init_tx_pools(netdev);
1059 static int __ibmvnic_open(struct net_device *netdev)
1061 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1062 enum vnic_state prev_state = adapter->state;
1065 adapter->state = VNIC_OPENING;
1066 replenish_pools(adapter);
1067 ibmvnic_napi_enable(adapter);
1069 /* We're ready to receive frames, enable the sub-crq interrupts and
1070 * set the logical link state to up
1072 for (i = 0; i < adapter->req_rx_queues; i++) {
1073 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1074 if (prev_state == VNIC_CLOSED)
1075 enable_irq(adapter->rx_scrq[i]->irq);
1076 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1079 for (i = 0; i < adapter->req_tx_queues; i++) {
1080 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1081 if (prev_state == VNIC_CLOSED)
1082 enable_irq(adapter->tx_scrq[i]->irq);
1083 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1086 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1088 for (i = 0; i < adapter->req_rx_queues; i++)
1089 napi_disable(&adapter->napi[i]);
1090 release_resources(adapter);
1094 netif_tx_start_all_queues(netdev);
1096 if (prev_state == VNIC_CLOSED) {
1097 for (i = 0; i < adapter->req_rx_queues; i++)
1098 napi_schedule(&adapter->napi[i]);
1101 adapter->state = VNIC_OPEN;
1105 static int ibmvnic_open(struct net_device *netdev)
1107 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1110 /* If device failover is pending, just set device state and return.
1111 * Device operation will be handled by reset routine.
1113 if (adapter->failover_pending) {
1114 adapter->state = VNIC_OPEN;
1118 mutex_lock(&adapter->reset_lock);
1120 if (adapter->state != VNIC_CLOSED) {
1121 rc = ibmvnic_login(netdev);
1123 mutex_unlock(&adapter->reset_lock);
1127 rc = init_resources(adapter);
1129 netdev_err(netdev, "failed to initialize resources\n");
1130 release_resources(adapter);
1131 mutex_unlock(&adapter->reset_lock);
1136 rc = __ibmvnic_open(netdev);
1137 netif_carrier_on(netdev);
1139 mutex_unlock(&adapter->reset_lock);
1144 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1146 struct ibmvnic_rx_pool *rx_pool;
1147 struct ibmvnic_rx_buff *rx_buff;
1152 if (!adapter->rx_pool)
1155 rx_scrqs = adapter->num_active_rx_pools;
1156 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1158 /* Free any remaining skbs in the rx buffer pools */
1159 for (i = 0; i < rx_scrqs; i++) {
1160 rx_pool = &adapter->rx_pool[i];
1161 if (!rx_pool || !rx_pool->rx_buff)
1164 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1165 for (j = 0; j < rx_entries; j++) {
1166 rx_buff = &rx_pool->rx_buff[j];
1167 if (rx_buff && rx_buff->skb) {
1168 dev_kfree_skb_any(rx_buff->skb);
1169 rx_buff->skb = NULL;
1175 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1176 struct ibmvnic_tx_pool *tx_pool)
1178 struct ibmvnic_tx_buff *tx_buff;
1182 if (!tx_pool || !tx_pool->tx_buff)
1185 tx_entries = tx_pool->num_buffers;
1187 for (i = 0; i < tx_entries; i++) {
1188 tx_buff = &tx_pool->tx_buff[i];
1189 if (tx_buff && tx_buff->skb) {
1190 dev_kfree_skb_any(tx_buff->skb);
1191 tx_buff->skb = NULL;
1196 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1201 if (!adapter->tx_pool || !adapter->tso_pool)
1204 tx_scrqs = adapter->num_active_tx_pools;
1206 /* Free any remaining skbs in the tx buffer pools */
1207 for (i = 0; i < tx_scrqs; i++) {
1208 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1209 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1210 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1214 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1216 struct net_device *netdev = adapter->netdev;
1219 if (adapter->tx_scrq) {
1220 for (i = 0; i < adapter->req_tx_queues; i++)
1221 if (adapter->tx_scrq[i]->irq) {
1223 "Disabling tx_scrq[%d] irq\n", i);
1224 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1225 disable_irq(adapter->tx_scrq[i]->irq);
1229 if (adapter->rx_scrq) {
1230 for (i = 0; i < adapter->req_rx_queues; i++) {
1231 if (adapter->rx_scrq[i]->irq) {
1233 "Disabling rx_scrq[%d] irq\n", i);
1234 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1235 disable_irq(adapter->rx_scrq[i]->irq);
1241 static void ibmvnic_cleanup(struct net_device *netdev)
1243 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1245 /* ensure that transmissions are stopped if called by do_reset */
1246 if (adapter->resetting)
1247 netif_tx_disable(netdev);
1249 netif_tx_stop_all_queues(netdev);
1251 ibmvnic_napi_disable(adapter);
1252 ibmvnic_disable_irqs(adapter);
1254 clean_rx_pools(adapter);
1255 clean_tx_pools(adapter);
1258 static int __ibmvnic_close(struct net_device *netdev)
1260 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1263 adapter->state = VNIC_CLOSING;
1264 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1267 adapter->state = VNIC_CLOSED;
1271 static int ibmvnic_close(struct net_device *netdev)
1273 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1276 /* If device failover is pending, just set device state and return.
1277 * Device operation will be handled by reset routine.
1279 if (adapter->failover_pending) {
1280 adapter->state = VNIC_CLOSED;
1284 mutex_lock(&adapter->reset_lock);
1285 rc = __ibmvnic_close(netdev);
1286 ibmvnic_cleanup(netdev);
1287 mutex_unlock(&adapter->reset_lock);
1293 * build_hdr_data - creates L2/L3/L4 header data buffer
1294 * @hdr_field - bitfield determining needed headers
1295 * @skb - socket buffer
1296 * @hdr_len - array of header lengths
1297 * @tot_len - total length of data
1299 * Reads hdr_field to determine which headers are needed by firmware.
1300 * Builds a buffer containing these headers. Saves individual header
1301 * lengths and total buffer length to be used to build descriptors.
1303 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1304 int *hdr_len, u8 *hdr_data)
1309 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1310 hdr_len[0] = sizeof(struct vlan_ethhdr);
1312 hdr_len[0] = sizeof(struct ethhdr);
1314 if (skb->protocol == htons(ETH_P_IP)) {
1315 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1316 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1317 hdr_len[2] = tcp_hdrlen(skb);
1318 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1319 hdr_len[2] = sizeof(struct udphdr);
1320 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1321 hdr_len[1] = sizeof(struct ipv6hdr);
1322 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1323 hdr_len[2] = tcp_hdrlen(skb);
1324 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1325 hdr_len[2] = sizeof(struct udphdr);
1326 } else if (skb->protocol == htons(ETH_P_ARP)) {
1327 hdr_len[1] = arp_hdr_len(skb->dev);
1331 memset(hdr_data, 0, 120);
1332 if ((hdr_field >> 6) & 1) {
1333 hdr = skb_mac_header(skb);
1334 memcpy(hdr_data, hdr, hdr_len[0]);
1338 if ((hdr_field >> 5) & 1) {
1339 hdr = skb_network_header(skb);
1340 memcpy(hdr_data + len, hdr, hdr_len[1]);
1344 if ((hdr_field >> 4) & 1) {
1345 hdr = skb_transport_header(skb);
1346 memcpy(hdr_data + len, hdr, hdr_len[2]);
1353 * create_hdr_descs - create header and header extension descriptors
1354 * @hdr_field - bitfield determining needed headers
1355 * @data - buffer containing header data
1356 * @len - length of data buffer
1357 * @hdr_len - array of individual header lengths
1358 * @scrq_arr - descriptor array
1360 * Creates header and, if needed, header extension descriptors and
1361 * places them in a descriptor array, scrq_arr
1364 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1365 union sub_crq *scrq_arr)
1367 union sub_crq hdr_desc;
1373 while (tmp_len > 0) {
1374 cur = hdr_data + len - tmp_len;
1376 memset(&hdr_desc, 0, sizeof(hdr_desc));
1377 if (cur != hdr_data) {
1378 data = hdr_desc.hdr_ext.data;
1379 tmp = tmp_len > 29 ? 29 : tmp_len;
1380 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1381 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1382 hdr_desc.hdr_ext.len = tmp;
1384 data = hdr_desc.hdr.data;
1385 tmp = tmp_len > 24 ? 24 : tmp_len;
1386 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1387 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1388 hdr_desc.hdr.len = tmp;
1389 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1390 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1391 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1392 hdr_desc.hdr.flag = hdr_field << 1;
1394 memcpy(data, cur, tmp);
1396 *scrq_arr = hdr_desc;
1405 * build_hdr_descs_arr - build a header descriptor array
1406 * @skb - socket buffer
1407 * @num_entries - number of descriptors to be sent
1408 * @subcrq - first TX descriptor
1409 * @hdr_field - bit field determining which headers will be sent
1411 * This function will build a TX descriptor array with applicable
1412 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1415 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1416 int *num_entries, u8 hdr_field)
1418 int hdr_len[3] = {0, 0, 0};
1420 u8 *hdr_data = txbuff->hdr_data;
1422 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1424 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1425 txbuff->indir_arr + 1);
1428 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1429 struct net_device *netdev)
1431 /* For some backing devices, mishandling of small packets
1432 * can result in a loss of connection or TX stall. Device
1433 * architects recommend that no packet should be smaller
1434 * than the minimum MTU value provided to the driver, so
1435 * pad any packets to that length
1437 if (skb->len < netdev->min_mtu)
1438 return skb_put_padto(skb, netdev->min_mtu);
1443 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1445 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1446 int queue_num = skb_get_queue_mapping(skb);
1447 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1448 struct device *dev = &adapter->vdev->dev;
1449 struct ibmvnic_tx_buff *tx_buff = NULL;
1450 struct ibmvnic_sub_crq_queue *tx_scrq;
1451 struct ibmvnic_tx_pool *tx_pool;
1452 unsigned int tx_send_failed = 0;
1453 unsigned int tx_map_failed = 0;
1454 unsigned int tx_dropped = 0;
1455 unsigned int tx_packets = 0;
1456 unsigned int tx_bytes = 0;
1457 dma_addr_t data_dma_addr;
1458 struct netdev_queue *txq;
1459 unsigned long lpar_rc;
1460 union sub_crq tx_crq;
1461 unsigned int offset;
1462 int num_entries = 1;
1469 if (adapter->resetting) {
1470 if (!netif_subqueue_stopped(netdev, skb))
1471 netif_stop_subqueue(netdev, queue_num);
1472 dev_kfree_skb_any(skb);
1480 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1486 if (skb_is_gso(skb))
1487 tx_pool = &adapter->tso_pool[queue_num];
1489 tx_pool = &adapter->tx_pool[queue_num];
1491 tx_scrq = adapter->tx_scrq[queue_num];
1492 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1493 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1494 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1496 index = tx_pool->free_map[tx_pool->consumer_index];
1498 if (index == IBMVNIC_INVALID_MAP) {
1499 dev_kfree_skb_any(skb);
1506 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1508 offset = index * tx_pool->buf_size;
1509 dst = tx_pool->long_term_buff.buff + offset;
1510 memset(dst, 0, tx_pool->buf_size);
1511 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1513 if (skb_shinfo(skb)->nr_frags) {
1517 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1518 cur = skb_headlen(skb);
1520 /* Copy the frags */
1521 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1522 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1525 page_address(skb_frag_page(frag)) +
1526 frag->page_offset, skb_frag_size(frag));
1527 cur += skb_frag_size(frag);
1530 skb_copy_from_linear_data(skb, dst, skb->len);
1533 tx_pool->consumer_index =
1534 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1536 tx_buff = &tx_pool->tx_buff[index];
1538 tx_buff->data_dma[0] = data_dma_addr;
1539 tx_buff->data_len[0] = skb->len;
1540 tx_buff->index = index;
1541 tx_buff->pool_index = queue_num;
1542 tx_buff->last_frag = true;
1544 memset(&tx_crq, 0, sizeof(tx_crq));
1545 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1546 tx_crq.v1.type = IBMVNIC_TX_DESC;
1547 tx_crq.v1.n_crq_elem = 1;
1548 tx_crq.v1.n_sge = 1;
1549 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1551 if (skb_is_gso(skb))
1552 tx_crq.v1.correlator =
1553 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1555 tx_crq.v1.correlator = cpu_to_be32(index);
1556 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1557 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1558 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1560 if (adapter->vlan_header_insertion) {
1561 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1562 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1565 if (skb->protocol == htons(ETH_P_IP)) {
1566 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1567 proto = ip_hdr(skb)->protocol;
1568 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1569 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1570 proto = ipv6_hdr(skb)->nexthdr;
1573 if (proto == IPPROTO_TCP)
1574 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1575 else if (proto == IPPROTO_UDP)
1576 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1578 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1579 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1582 if (skb_is_gso(skb)) {
1583 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1584 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1587 /* determine if l2/3/4 headers are sent to firmware */
1588 if ((*hdrs >> 7) & 1) {
1589 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1590 tx_crq.v1.n_crq_elem = num_entries;
1591 tx_buff->num_entries = num_entries;
1592 tx_buff->indir_arr[0] = tx_crq;
1593 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1594 sizeof(tx_buff->indir_arr),
1596 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1597 dev_kfree_skb_any(skb);
1598 tx_buff->skb = NULL;
1599 if (!firmware_has_feature(FW_FEATURE_CMO))
1600 dev_err(dev, "tx: unable to map descriptor array\n");
1606 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1607 (u64)tx_buff->indir_dma,
1610 tx_buff->num_entries = num_entries;
1611 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1614 if (lpar_rc != H_SUCCESS) {
1615 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
1616 dev_kfree_skb_any(skb);
1617 tx_buff->skb = NULL;
1619 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1620 /* Disable TX and report carrier off if queue is closed
1621 * or pending failover.
1622 * Firmware guarantees that a signal will be sent to the
1623 * driver, triggering a reset or some other action.
1625 netif_tx_stop_all_queues(netdev);
1626 netif_carrier_off(netdev);
1635 if (atomic_add_return(num_entries, &tx_scrq->used)
1636 >= adapter->req_tx_entries_per_subcrq) {
1637 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1638 netif_stop_subqueue(netdev, queue_num);
1642 tx_bytes += skb->len;
1643 txq->trans_start = jiffies;
1648 /* roll back consumer index and map array*/
1649 if (tx_pool->consumer_index == 0)
1650 tx_pool->consumer_index =
1651 tx_pool->num_buffers - 1;
1653 tx_pool->consumer_index--;
1654 tx_pool->free_map[tx_pool->consumer_index] = index;
1656 netdev->stats.tx_dropped += tx_dropped;
1657 netdev->stats.tx_bytes += tx_bytes;
1658 netdev->stats.tx_packets += tx_packets;
1659 adapter->tx_send_failed += tx_send_failed;
1660 adapter->tx_map_failed += tx_map_failed;
1661 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1662 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1663 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1668 static void ibmvnic_set_multi(struct net_device *netdev)
1670 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1671 struct netdev_hw_addr *ha;
1672 union ibmvnic_crq crq;
1674 memset(&crq, 0, sizeof(crq));
1675 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1676 crq.request_capability.cmd = REQUEST_CAPABILITY;
1678 if (netdev->flags & IFF_PROMISC) {
1679 if (!adapter->promisc_supported)
1682 if (netdev->flags & IFF_ALLMULTI) {
1683 /* Accept all multicast */
1684 memset(&crq, 0, sizeof(crq));
1685 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1686 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1687 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1688 ibmvnic_send_crq(adapter, &crq);
1689 } else if (netdev_mc_empty(netdev)) {
1690 /* Reject all multicast */
1691 memset(&crq, 0, sizeof(crq));
1692 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1693 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1694 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1695 ibmvnic_send_crq(adapter, &crq);
1697 /* Accept one or more multicast(s) */
1698 netdev_for_each_mc_addr(ha, netdev) {
1699 memset(&crq, 0, sizeof(crq));
1700 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1701 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1702 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1703 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1705 ibmvnic_send_crq(adapter, &crq);
1711 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
1713 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1714 struct sockaddr *addr = p;
1715 union ibmvnic_crq crq;
1718 if (!is_valid_ether_addr(addr->sa_data))
1719 return -EADDRNOTAVAIL;
1721 memset(&crq, 0, sizeof(crq));
1722 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1723 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1724 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
1726 init_completion(&adapter->fw_done);
1727 rc = ibmvnic_send_crq(adapter, &crq);
1730 wait_for_completion(&adapter->fw_done);
1731 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1732 return adapter->fw_done_rc ? -EIO : 0;
1735 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1737 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1738 struct sockaddr *addr = p;
1741 if (adapter->state == VNIC_PROBED) {
1742 memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
1743 adapter->mac_change_pending = true;
1747 rc = __ibmvnic_set_mac(netdev, addr);
1753 * do_reset returns zero if we are able to keep processing reset events, or
1754 * non-zero if we hit a fatal error and must halt.
1756 static int do_reset(struct ibmvnic_adapter *adapter,
1757 struct ibmvnic_rwi *rwi, u32 reset_state)
1759 u64 old_num_rx_queues, old_num_tx_queues;
1760 struct net_device *netdev = adapter->netdev;
1763 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1766 netif_carrier_off(netdev);
1767 adapter->reset_reason = rwi->reset_reason;
1769 old_num_rx_queues = adapter->req_rx_queues;
1770 old_num_tx_queues = adapter->req_tx_queues;
1772 ibmvnic_cleanup(netdev);
1774 if (adapter->reset_reason != VNIC_RESET_MOBILITY &&
1775 adapter->reset_reason != VNIC_RESET_FAILOVER) {
1776 rc = __ibmvnic_close(netdev);
1781 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1782 adapter->wait_for_reset) {
1783 release_resources(adapter);
1784 release_sub_crqs(adapter, 1);
1785 release_crq_queue(adapter);
1788 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1789 /* remove the closed state so when we call open it appears
1790 * we are coming from the probed state.
1792 adapter->state = VNIC_PROBED;
1794 if (adapter->wait_for_reset) {
1795 rc = init_crq_queue(adapter);
1796 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
1797 rc = ibmvnic_reenable_crq_queue(adapter);
1798 release_sub_crqs(adapter, 1);
1800 rc = ibmvnic_reset_crq(adapter);
1802 rc = vio_enable_interrupts(adapter->vdev);
1806 netdev_err(adapter->netdev,
1807 "Couldn't initialize crq. rc=%d\n", rc);
1811 rc = ibmvnic_reset_init(adapter);
1813 return IBMVNIC_INIT_FAILED;
1815 /* If the adapter was in PROBE state prior to the reset,
1818 if (reset_state == VNIC_PROBED)
1821 rc = ibmvnic_login(netdev);
1823 adapter->state = VNIC_PROBED;
1827 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1828 adapter->wait_for_reset) {
1829 rc = init_resources(adapter);
1832 } else if (adapter->req_rx_queues != old_num_rx_queues ||
1833 adapter->req_tx_queues != old_num_tx_queues) {
1834 adapter->map_id = 1;
1835 release_rx_pools(adapter);
1836 release_tx_pools(adapter);
1837 init_rx_pools(netdev);
1838 init_tx_pools(netdev);
1840 release_napi(adapter);
1843 rc = reset_tx_pools(adapter);
1847 rc = reset_rx_pools(adapter);
1851 ibmvnic_disable_irqs(adapter);
1853 adapter->state = VNIC_CLOSED;
1855 if (reset_state == VNIC_CLOSED)
1858 rc = __ibmvnic_open(netdev);
1860 if (list_empty(&adapter->rwi_list))
1861 adapter->state = VNIC_CLOSED;
1863 adapter->state = reset_state;
1869 for (i = 0; i < adapter->req_rx_queues; i++)
1870 napi_schedule(&adapter->napi[i]);
1872 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
1873 adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
1874 netdev_notify_peers(netdev);
1876 netif_carrier_on(netdev);
1881 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1883 struct ibmvnic_rwi *rwi;
1885 mutex_lock(&adapter->rwi_lock);
1887 if (!list_empty(&adapter->rwi_list)) {
1888 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1890 list_del(&rwi->list);
1895 mutex_unlock(&adapter->rwi_lock);
1899 static void free_all_rwi(struct ibmvnic_adapter *adapter)
1901 struct ibmvnic_rwi *rwi;
1903 rwi = get_next_rwi(adapter);
1906 rwi = get_next_rwi(adapter);
1910 static void __ibmvnic_reset(struct work_struct *work)
1912 struct ibmvnic_rwi *rwi;
1913 struct ibmvnic_adapter *adapter;
1914 struct net_device *netdev;
1918 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1919 netdev = adapter->netdev;
1921 mutex_lock(&adapter->reset_lock);
1922 adapter->resetting = true;
1923 reset_state = adapter->state;
1925 rwi = get_next_rwi(adapter);
1927 rc = do_reset(adapter, rwi, reset_state);
1929 if (rc && rc != IBMVNIC_INIT_FAILED)
1932 rwi = get_next_rwi(adapter);
1935 if (adapter->wait_for_reset) {
1936 adapter->wait_for_reset = false;
1937 adapter->reset_done_rc = rc;
1938 complete(&adapter->reset_done);
1942 netdev_dbg(adapter->netdev, "Reset failed\n");
1943 free_all_rwi(adapter);
1944 mutex_unlock(&adapter->reset_lock);
1948 adapter->resetting = false;
1949 mutex_unlock(&adapter->reset_lock);
1952 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
1953 enum ibmvnic_reset_reason reason)
1955 struct ibmvnic_rwi *rwi, *tmp;
1956 struct net_device *netdev = adapter->netdev;
1957 struct list_head *entry;
1960 if (adapter->state == VNIC_REMOVING ||
1961 adapter->state == VNIC_REMOVED ||
1962 adapter->failover_pending) {
1964 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
1968 if (adapter->state == VNIC_PROBING) {
1969 netdev_warn(netdev, "Adapter reset during probe\n");
1970 ret = adapter->init_done_rc = EAGAIN;
1974 mutex_lock(&adapter->rwi_lock);
1976 list_for_each(entry, &adapter->rwi_list) {
1977 tmp = list_entry(entry, struct ibmvnic_rwi, list);
1978 if (tmp->reset_reason == reason) {
1979 netdev_dbg(netdev, "Skipping matching reset\n");
1980 mutex_unlock(&adapter->rwi_lock);
1986 rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
1988 mutex_unlock(&adapter->rwi_lock);
1989 ibmvnic_close(netdev);
1994 rwi->reset_reason = reason;
1995 list_add_tail(&rwi->list, &adapter->rwi_list);
1996 mutex_unlock(&adapter->rwi_lock);
1998 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
1999 schedule_work(&adapter->ibmvnic_reset);
2003 if (adapter->wait_for_reset)
2004 adapter->wait_for_reset = false;
2008 static void ibmvnic_tx_timeout(struct net_device *dev)
2010 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2012 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2015 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2016 struct ibmvnic_rx_buff *rx_buff)
2018 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2020 rx_buff->skb = NULL;
2022 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2023 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2025 atomic_dec(&pool->available);
2028 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2030 struct net_device *netdev = napi->dev;
2031 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2032 int scrq_num = (int)(napi - adapter->napi);
2033 int frames_processed = 0;
2036 while (frames_processed < budget) {
2037 struct sk_buff *skb;
2038 struct ibmvnic_rx_buff *rx_buff;
2039 union sub_crq *next;
2044 if (unlikely(adapter->resetting &&
2045 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2046 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2047 napi_complete_done(napi, frames_processed);
2048 return frames_processed;
2051 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2053 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2055 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2056 rx_comp.correlator);
2057 /* do error checking */
2058 if (next->rx_comp.rc) {
2059 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2060 be16_to_cpu(next->rx_comp.rc));
2061 /* free the entry */
2062 next->rx_comp.first = 0;
2063 dev_kfree_skb_any(rx_buff->skb);
2064 remove_buff_from_pool(adapter, rx_buff);
2066 } else if (!rx_buff->skb) {
2067 /* free the entry */
2068 next->rx_comp.first = 0;
2069 remove_buff_from_pool(adapter, rx_buff);
2073 length = be32_to_cpu(next->rx_comp.len);
2074 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2075 flags = next->rx_comp.flags;
2077 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2080 /* VLAN Header has been stripped by the system firmware and
2081 * needs to be inserted by the driver
2083 if (adapter->rx_vlan_header_insertion &&
2084 (flags & IBMVNIC_VLAN_STRIPPED))
2085 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2086 ntohs(next->rx_comp.vlan_tci));
2088 /* free the entry */
2089 next->rx_comp.first = 0;
2090 remove_buff_from_pool(adapter, rx_buff);
2092 skb_put(skb, length);
2093 skb->protocol = eth_type_trans(skb, netdev);
2094 skb_record_rx_queue(skb, scrq_num);
2096 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2097 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2098 skb->ip_summed = CHECKSUM_UNNECESSARY;
2102 napi_gro_receive(napi, skb); /* send it up */
2103 netdev->stats.rx_packets++;
2104 netdev->stats.rx_bytes += length;
2105 adapter->rx_stats_buffers[scrq_num].packets++;
2106 adapter->rx_stats_buffers[scrq_num].bytes += length;
2110 if (adapter->state != VNIC_CLOSING)
2111 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2113 if (frames_processed < budget) {
2114 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2115 napi_complete_done(napi, frames_processed);
2116 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2117 napi_reschedule(napi)) {
2118 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2122 return frames_processed;
2125 #ifdef CONFIG_NET_POLL_CONTROLLER
2126 static void ibmvnic_netpoll_controller(struct net_device *dev)
2128 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2131 replenish_pools(netdev_priv(dev));
2132 for (i = 0; i < adapter->req_rx_queues; i++)
2133 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
2134 adapter->rx_scrq[i]);
2138 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2142 adapter->fallback.mtu = adapter->req_mtu;
2143 adapter->fallback.rx_queues = adapter->req_rx_queues;
2144 adapter->fallback.tx_queues = adapter->req_tx_queues;
2145 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2146 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2148 init_completion(&adapter->reset_done);
2149 adapter->wait_for_reset = true;
2150 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2153 wait_for_completion(&adapter->reset_done);
2156 if (adapter->reset_done_rc) {
2158 adapter->desired.mtu = adapter->fallback.mtu;
2159 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2160 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2161 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2162 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2164 init_completion(&adapter->reset_done);
2165 adapter->wait_for_reset = true;
2166 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2169 wait_for_completion(&adapter->reset_done);
2171 adapter->wait_for_reset = false;
2176 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2178 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2180 adapter->desired.mtu = new_mtu + ETH_HLEN;
2182 return wait_for_reset(adapter);
2185 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2186 struct net_device *dev,
2187 netdev_features_t features)
2189 /* Some backing hardware adapters can not
2190 * handle packets with a MSS less than 224
2191 * or with only one segment.
2193 if (skb_is_gso(skb)) {
2194 if (skb_shinfo(skb)->gso_size < 224 ||
2195 skb_shinfo(skb)->gso_segs == 1)
2196 features &= ~NETIF_F_GSO_MASK;
2202 static const struct net_device_ops ibmvnic_netdev_ops = {
2203 .ndo_open = ibmvnic_open,
2204 .ndo_stop = ibmvnic_close,
2205 .ndo_start_xmit = ibmvnic_xmit,
2206 .ndo_set_rx_mode = ibmvnic_set_multi,
2207 .ndo_set_mac_address = ibmvnic_set_mac,
2208 .ndo_validate_addr = eth_validate_addr,
2209 .ndo_tx_timeout = ibmvnic_tx_timeout,
2210 #ifdef CONFIG_NET_POLL_CONTROLLER
2211 .ndo_poll_controller = ibmvnic_netpoll_controller,
2213 .ndo_change_mtu = ibmvnic_change_mtu,
2214 .ndo_features_check = ibmvnic_features_check,
2217 /* ethtool functions */
2219 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2220 struct ethtool_link_ksettings *cmd)
2222 u32 supported, advertising;
2224 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
2226 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
2228 cmd->base.speed = SPEED_1000;
2229 cmd->base.duplex = DUPLEX_FULL;
2230 cmd->base.port = PORT_FIBRE;
2231 cmd->base.phy_address = 0;
2232 cmd->base.autoneg = AUTONEG_ENABLE;
2234 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2236 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2242 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2243 struct ethtool_drvinfo *info)
2245 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2247 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2248 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2249 strlcpy(info->fw_version, adapter->fw_version,
2250 sizeof(info->fw_version));
2253 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2255 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2257 return adapter->msg_enable;
2260 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2262 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2264 adapter->msg_enable = data;
2267 static u32 ibmvnic_get_link(struct net_device *netdev)
2269 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2271 /* Don't need to send a query because we request a logical link up at
2272 * init and then we wait for link state indications
2274 return adapter->logical_link_state;
2277 static void ibmvnic_get_ringparam(struct net_device *netdev,
2278 struct ethtool_ringparam *ring)
2280 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2282 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2283 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2284 ring->rx_mini_max_pending = 0;
2285 ring->rx_jumbo_max_pending = 0;
2286 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2287 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2288 ring->rx_mini_pending = 0;
2289 ring->rx_jumbo_pending = 0;
2292 static int ibmvnic_set_ringparam(struct net_device *netdev,
2293 struct ethtool_ringparam *ring)
2295 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2297 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
2298 ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
2299 netdev_err(netdev, "Invalid request.\n");
2300 netdev_err(netdev, "Max tx buffers = %llu\n",
2301 adapter->max_rx_add_entries_per_subcrq);
2302 netdev_err(netdev, "Max rx buffers = %llu\n",
2303 adapter->max_tx_entries_per_subcrq);
2307 adapter->desired.rx_entries = ring->rx_pending;
2308 adapter->desired.tx_entries = ring->tx_pending;
2310 return wait_for_reset(adapter);
2313 static void ibmvnic_get_channels(struct net_device *netdev,
2314 struct ethtool_channels *channels)
2316 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2318 channels->max_rx = adapter->max_rx_queues;
2319 channels->max_tx = adapter->max_tx_queues;
2320 channels->max_other = 0;
2321 channels->max_combined = 0;
2322 channels->rx_count = adapter->req_rx_queues;
2323 channels->tx_count = adapter->req_tx_queues;
2324 channels->other_count = 0;
2325 channels->combined_count = 0;
2328 static int ibmvnic_set_channels(struct net_device *netdev,
2329 struct ethtool_channels *channels)
2331 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2333 adapter->desired.rx_queues = channels->rx_count;
2334 adapter->desired.tx_queues = channels->tx_count;
2336 return wait_for_reset(adapter);
2339 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2341 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2344 if (stringset != ETH_SS_STATS)
2347 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
2348 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2350 for (i = 0; i < adapter->req_tx_queues; i++) {
2351 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2352 data += ETH_GSTRING_LEN;
2354 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2355 data += ETH_GSTRING_LEN;
2357 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
2358 data += ETH_GSTRING_LEN;
2361 for (i = 0; i < adapter->req_rx_queues; i++) {
2362 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2363 data += ETH_GSTRING_LEN;
2365 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2366 data += ETH_GSTRING_LEN;
2368 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2369 data += ETH_GSTRING_LEN;
2373 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2375 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2379 return ARRAY_SIZE(ibmvnic_stats) +
2380 adapter->req_tx_queues * NUM_TX_STATS +
2381 adapter->req_rx_queues * NUM_RX_STATS;
2387 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2388 struct ethtool_stats *stats, u64 *data)
2390 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2391 union ibmvnic_crq crq;
2395 memset(&crq, 0, sizeof(crq));
2396 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2397 crq.request_statistics.cmd = REQUEST_STATISTICS;
2398 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2399 crq.request_statistics.len =
2400 cpu_to_be32(sizeof(struct ibmvnic_statistics));
2402 /* Wait for data to be written */
2403 init_completion(&adapter->stats_done);
2404 rc = ibmvnic_send_crq(adapter, &crq);
2407 wait_for_completion(&adapter->stats_done);
2409 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2410 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2411 ibmvnic_stats[i].offset));
2413 for (j = 0; j < adapter->req_tx_queues; j++) {
2414 data[i] = adapter->tx_stats_buffers[j].packets;
2416 data[i] = adapter->tx_stats_buffers[j].bytes;
2418 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2422 for (j = 0; j < adapter->req_rx_queues; j++) {
2423 data[i] = adapter->rx_stats_buffers[j].packets;
2425 data[i] = adapter->rx_stats_buffers[j].bytes;
2427 data[i] = adapter->rx_stats_buffers[j].interrupts;
2432 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2433 .get_drvinfo = ibmvnic_get_drvinfo,
2434 .get_msglevel = ibmvnic_get_msglevel,
2435 .set_msglevel = ibmvnic_set_msglevel,
2436 .get_link = ibmvnic_get_link,
2437 .get_ringparam = ibmvnic_get_ringparam,
2438 .set_ringparam = ibmvnic_set_ringparam,
2439 .get_channels = ibmvnic_get_channels,
2440 .set_channels = ibmvnic_set_channels,
2441 .get_strings = ibmvnic_get_strings,
2442 .get_sset_count = ibmvnic_get_sset_count,
2443 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
2444 .get_link_ksettings = ibmvnic_get_link_ksettings,
2447 /* Routines for managing CRQs/sCRQs */
2449 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2450 struct ibmvnic_sub_crq_queue *scrq)
2455 free_irq(scrq->irq, scrq);
2456 irq_dispose_mapping(scrq->irq);
2460 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2461 atomic_set(&scrq->used, 0);
2464 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2465 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2469 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2473 for (i = 0; i < adapter->req_tx_queues; i++) {
2474 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2475 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2480 for (i = 0; i < adapter->req_rx_queues; i++) {
2481 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2482 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2490 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2491 struct ibmvnic_sub_crq_queue *scrq,
2494 struct device *dev = &adapter->vdev->dev;
2497 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2500 /* Close the sub-crqs */
2502 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2503 adapter->vdev->unit_address,
2505 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2508 netdev_err(adapter->netdev,
2509 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2514 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2516 free_pages((unsigned long)scrq->msgs, 2);
2520 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2523 struct device *dev = &adapter->vdev->dev;
2524 struct ibmvnic_sub_crq_queue *scrq;
2527 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2532 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2534 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2535 goto zero_page_failed;
2538 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2540 if (dma_mapping_error(dev, scrq->msg_token)) {
2541 dev_warn(dev, "Couldn't map crq queue messages page\n");
2545 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2546 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2548 if (rc == H_RESOURCE)
2549 rc = ibmvnic_reset_crq(adapter);
2551 if (rc == H_CLOSED) {
2552 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2554 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2558 scrq->adapter = adapter;
2559 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2560 spin_lock_init(&scrq->lock);
2562 netdev_dbg(adapter->netdev,
2563 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2564 scrq->crq_num, scrq->hw_irq, scrq->irq);
2569 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2572 free_pages((unsigned long)scrq->msgs, 2);
2579 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2583 if (adapter->tx_scrq) {
2584 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2585 if (!adapter->tx_scrq[i])
2588 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2590 if (adapter->tx_scrq[i]->irq) {
2591 free_irq(adapter->tx_scrq[i]->irq,
2592 adapter->tx_scrq[i]);
2593 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2594 adapter->tx_scrq[i]->irq = 0;
2597 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2601 kfree(adapter->tx_scrq);
2602 adapter->tx_scrq = NULL;
2603 adapter->num_active_tx_scrqs = 0;
2606 if (adapter->rx_scrq) {
2607 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
2608 if (!adapter->rx_scrq[i])
2611 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2613 if (adapter->rx_scrq[i]->irq) {
2614 free_irq(adapter->rx_scrq[i]->irq,
2615 adapter->rx_scrq[i]);
2616 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2617 adapter->rx_scrq[i]->irq = 0;
2620 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2624 kfree(adapter->rx_scrq);
2625 adapter->rx_scrq = NULL;
2626 adapter->num_active_rx_scrqs = 0;
2630 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2631 struct ibmvnic_sub_crq_queue *scrq)
2633 struct device *dev = &adapter->vdev->dev;
2636 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2637 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2639 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2644 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2645 struct ibmvnic_sub_crq_queue *scrq)
2647 struct device *dev = &adapter->vdev->dev;
2651 if (scrq->hw_irq > 0x100000000ULL) {
2652 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2656 val = (0xff000000) | scrq->hw_irq;
2657 rc = plpar_hcall_norets(H_EOI, val);
2659 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2662 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2663 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2665 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2670 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2671 struct ibmvnic_sub_crq_queue *scrq)
2673 struct device *dev = &adapter->vdev->dev;
2674 struct ibmvnic_tx_pool *tx_pool;
2675 struct ibmvnic_tx_buff *txbuff;
2676 union sub_crq *next;
2682 while (pending_scrq(adapter, scrq)) {
2683 unsigned int pool = scrq->pool_index;
2684 int num_entries = 0;
2686 next = ibmvnic_next_scrq(adapter, scrq);
2687 for (i = 0; i < next->tx_comp.num_comps; i++) {
2688 if (next->tx_comp.rcs[i]) {
2689 dev_err(dev, "tx error %x\n",
2690 next->tx_comp.rcs[i]);
2693 index = be32_to_cpu(next->tx_comp.correlators[i]);
2694 if (index & IBMVNIC_TSO_POOL_MASK) {
2695 tx_pool = &adapter->tso_pool[pool];
2696 index &= ~IBMVNIC_TSO_POOL_MASK;
2698 tx_pool = &adapter->tx_pool[pool];
2701 txbuff = &tx_pool->tx_buff[index];
2703 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2704 if (!txbuff->data_dma[j])
2707 txbuff->data_dma[j] = 0;
2709 /* if sub_crq was sent indirectly */
2710 first = &txbuff->indir_arr[0].generic.first;
2711 if (*first == IBMVNIC_CRQ_CMD) {
2712 dma_unmap_single(dev, txbuff->indir_dma,
2713 sizeof(txbuff->indir_arr),
2718 if (txbuff->last_frag) {
2719 dev_kfree_skb_any(txbuff->skb);
2723 num_entries += txbuff->num_entries;
2725 tx_pool->free_map[tx_pool->producer_index] = index;
2726 tx_pool->producer_index =
2727 (tx_pool->producer_index + 1) %
2728 tx_pool->num_buffers;
2730 /* remove tx_comp scrq*/
2731 next->tx_comp.first = 0;
2733 if (atomic_sub_return(num_entries, &scrq->used) <=
2734 (adapter->req_tx_entries_per_subcrq / 2) &&
2735 __netif_subqueue_stopped(adapter->netdev,
2736 scrq->pool_index)) {
2737 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
2738 netdev_dbg(adapter->netdev, "Started queue %d\n",
2743 enable_scrq_irq(adapter, scrq);
2745 if (pending_scrq(adapter, scrq)) {
2746 disable_scrq_irq(adapter, scrq);
2753 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
2755 struct ibmvnic_sub_crq_queue *scrq = instance;
2756 struct ibmvnic_adapter *adapter = scrq->adapter;
2758 disable_scrq_irq(adapter, scrq);
2759 ibmvnic_complete_tx(adapter, scrq);
2764 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2766 struct ibmvnic_sub_crq_queue *scrq = instance;
2767 struct ibmvnic_adapter *adapter = scrq->adapter;
2769 /* When booting a kdump kernel we can hit pending interrupts
2770 * prior to completing driver initialization.
2772 if (unlikely(adapter->state != VNIC_OPEN))
2775 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2777 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
2778 disable_scrq_irq(adapter, scrq);
2779 __napi_schedule(&adapter->napi[scrq->scrq_num]);
2785 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
2787 struct device *dev = &adapter->vdev->dev;
2788 struct ibmvnic_sub_crq_queue *scrq;
2792 for (i = 0; i < adapter->req_tx_queues; i++) {
2793 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
2795 scrq = adapter->tx_scrq[i];
2796 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2800 dev_err(dev, "Error mapping irq\n");
2801 goto req_tx_irq_failed;
2804 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
2805 0, "ibmvnic_tx", scrq);
2808 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
2810 irq_dispose_mapping(scrq->irq);
2811 goto req_tx_irq_failed;
2815 for (i = 0; i < adapter->req_rx_queues; i++) {
2816 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
2818 scrq = adapter->rx_scrq[i];
2819 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2822 dev_err(dev, "Error mapping irq\n");
2823 goto req_rx_irq_failed;
2825 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
2826 0, "ibmvnic_rx", scrq);
2828 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
2830 irq_dispose_mapping(scrq->irq);
2831 goto req_rx_irq_failed;
2837 for (j = 0; j < i; j++) {
2838 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
2839 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2841 i = adapter->req_tx_queues;
2843 for (j = 0; j < i; j++) {
2844 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
2845 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2847 release_sub_crqs(adapter, 1);
2851 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
2853 struct device *dev = &adapter->vdev->dev;
2854 struct ibmvnic_sub_crq_queue **allqueues;
2855 int registered_queues = 0;
2860 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
2862 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
2866 for (i = 0; i < total_queues; i++) {
2867 allqueues[i] = init_sub_crq_queue(adapter);
2868 if (!allqueues[i]) {
2869 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
2872 registered_queues++;
2875 /* Make sure we were able to register the minimum number of queues */
2876 if (registered_queues <
2877 adapter->min_tx_queues + adapter->min_rx_queues) {
2878 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
2882 /* Distribute the failed allocated queues*/
2883 for (i = 0; i < total_queues - registered_queues + more ; i++) {
2884 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
2887 if (adapter->req_rx_queues > adapter->min_rx_queues)
2888 adapter->req_rx_queues--;
2893 if (adapter->req_tx_queues > adapter->min_tx_queues)
2894 adapter->req_tx_queues--;
2901 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
2902 sizeof(*adapter->tx_scrq), GFP_KERNEL);
2903 if (!adapter->tx_scrq)
2906 for (i = 0; i < adapter->req_tx_queues; i++) {
2907 adapter->tx_scrq[i] = allqueues[i];
2908 adapter->tx_scrq[i]->pool_index = i;
2909 adapter->num_active_tx_scrqs++;
2912 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
2913 sizeof(*adapter->rx_scrq), GFP_KERNEL);
2914 if (!adapter->rx_scrq)
2917 for (i = 0; i < adapter->req_rx_queues; i++) {
2918 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
2919 adapter->rx_scrq[i]->scrq_num = i;
2920 adapter->num_active_rx_scrqs++;
2927 kfree(adapter->tx_scrq);
2928 adapter->tx_scrq = NULL;
2930 for (i = 0; i < registered_queues; i++)
2931 release_sub_crq_queue(adapter, allqueues[i], 1);
2936 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
2938 struct device *dev = &adapter->vdev->dev;
2939 union ibmvnic_crq crq;
2943 /* Sub-CRQ entries are 32 byte long */
2944 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
2946 if (adapter->min_tx_entries_per_subcrq > entries_page ||
2947 adapter->min_rx_add_entries_per_subcrq > entries_page) {
2948 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
2952 if (adapter->desired.mtu)
2953 adapter->req_mtu = adapter->desired.mtu;
2955 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
2957 if (!adapter->desired.tx_entries)
2958 adapter->desired.tx_entries =
2959 adapter->max_tx_entries_per_subcrq;
2960 if (!adapter->desired.rx_entries)
2961 adapter->desired.rx_entries =
2962 adapter->max_rx_add_entries_per_subcrq;
2964 max_entries = IBMVNIC_MAX_LTB_SIZE /
2965 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
2967 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
2968 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
2969 adapter->desired.tx_entries = max_entries;
2972 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
2973 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
2974 adapter->desired.rx_entries = max_entries;
2977 if (adapter->desired.tx_entries)
2978 adapter->req_tx_entries_per_subcrq =
2979 adapter->desired.tx_entries;
2981 adapter->req_tx_entries_per_subcrq =
2982 adapter->max_tx_entries_per_subcrq;
2984 if (adapter->desired.rx_entries)
2985 adapter->req_rx_add_entries_per_subcrq =
2986 adapter->desired.rx_entries;
2988 adapter->req_rx_add_entries_per_subcrq =
2989 adapter->max_rx_add_entries_per_subcrq;
2991 if (adapter->desired.tx_queues)
2992 adapter->req_tx_queues =
2993 adapter->desired.tx_queues;
2995 adapter->req_tx_queues =
2996 adapter->opt_tx_comp_sub_queues;
2998 if (adapter->desired.rx_queues)
2999 adapter->req_rx_queues =
3000 adapter->desired.rx_queues;
3002 adapter->req_rx_queues =
3003 adapter->opt_rx_comp_queues;
3005 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3008 memset(&crq, 0, sizeof(crq));
3009 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3010 crq.request_capability.cmd = REQUEST_CAPABILITY;
3012 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3013 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3014 atomic_inc(&adapter->running_cap_crqs);
3015 ibmvnic_send_crq(adapter, &crq);
3017 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3018 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3019 atomic_inc(&adapter->running_cap_crqs);
3020 ibmvnic_send_crq(adapter, &crq);
3022 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3023 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3024 atomic_inc(&adapter->running_cap_crqs);
3025 ibmvnic_send_crq(adapter, &crq);
3027 crq.request_capability.capability =
3028 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3029 crq.request_capability.number =
3030 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3031 atomic_inc(&adapter->running_cap_crqs);
3032 ibmvnic_send_crq(adapter, &crq);
3034 crq.request_capability.capability =
3035 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3036 crq.request_capability.number =
3037 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3038 atomic_inc(&adapter->running_cap_crqs);
3039 ibmvnic_send_crq(adapter, &crq);
3041 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3042 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3043 atomic_inc(&adapter->running_cap_crqs);
3044 ibmvnic_send_crq(adapter, &crq);
3046 if (adapter->netdev->flags & IFF_PROMISC) {
3047 if (adapter->promisc_supported) {
3048 crq.request_capability.capability =
3049 cpu_to_be16(PROMISC_REQUESTED);
3050 crq.request_capability.number = cpu_to_be64(1);
3051 atomic_inc(&adapter->running_cap_crqs);
3052 ibmvnic_send_crq(adapter, &crq);
3055 crq.request_capability.capability =
3056 cpu_to_be16(PROMISC_REQUESTED);
3057 crq.request_capability.number = cpu_to_be64(0);
3058 atomic_inc(&adapter->running_cap_crqs);
3059 ibmvnic_send_crq(adapter, &crq);
3063 static int pending_scrq(struct ibmvnic_adapter *adapter,
3064 struct ibmvnic_sub_crq_queue *scrq)
3066 union sub_crq *entry = &scrq->msgs[scrq->cur];
3068 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3074 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3075 struct ibmvnic_sub_crq_queue *scrq)
3077 union sub_crq *entry;
3078 unsigned long flags;
3080 spin_lock_irqsave(&scrq->lock, flags);
3081 entry = &scrq->msgs[scrq->cur];
3082 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3083 if (++scrq->cur == scrq->size)
3088 spin_unlock_irqrestore(&scrq->lock, flags);
3093 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3095 struct ibmvnic_crq_queue *queue = &adapter->crq;
3096 union ibmvnic_crq *crq;
3098 crq = &queue->msgs[queue->cur];
3099 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3100 if (++queue->cur == queue->size)
3109 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3110 union sub_crq *sub_crq)
3112 unsigned int ua = adapter->vdev->unit_address;
3113 struct device *dev = &adapter->vdev->dev;
3114 u64 *u64_crq = (u64 *)sub_crq;
3117 netdev_dbg(adapter->netdev,
3118 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3119 (unsigned long int)cpu_to_be64(remote_handle),
3120 (unsigned long int)cpu_to_be64(u64_crq[0]),
3121 (unsigned long int)cpu_to_be64(u64_crq[1]),
3122 (unsigned long int)cpu_to_be64(u64_crq[2]),
3123 (unsigned long int)cpu_to_be64(u64_crq[3]));
3125 /* Make sure the hypervisor sees the complete request */
3128 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3129 cpu_to_be64(remote_handle),
3130 cpu_to_be64(u64_crq[0]),
3131 cpu_to_be64(u64_crq[1]),
3132 cpu_to_be64(u64_crq[2]),
3133 cpu_to_be64(u64_crq[3]));
3137 dev_warn(dev, "CRQ Queue closed\n");
3138 dev_err(dev, "Send error (rc=%d)\n", rc);
3144 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3145 u64 remote_handle, u64 ioba, u64 num_entries)
3147 unsigned int ua = adapter->vdev->unit_address;
3148 struct device *dev = &adapter->vdev->dev;
3151 /* Make sure the hypervisor sees the complete request */
3153 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3154 cpu_to_be64(remote_handle),
3159 dev_warn(dev, "CRQ Queue closed\n");
3160 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
3166 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3167 union ibmvnic_crq *crq)
3169 unsigned int ua = adapter->vdev->unit_address;
3170 struct device *dev = &adapter->vdev->dev;
3171 u64 *u64_crq = (u64 *)crq;
3174 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3175 (unsigned long int)cpu_to_be64(u64_crq[0]),
3176 (unsigned long int)cpu_to_be64(u64_crq[1]));
3178 if (!adapter->crq.active &&
3179 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3180 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3184 /* Make sure the hypervisor sees the complete request */
3187 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3188 cpu_to_be64(u64_crq[0]),
3189 cpu_to_be64(u64_crq[1]));
3192 if (rc == H_CLOSED) {
3193 dev_warn(dev, "CRQ Queue closed\n");
3194 if (adapter->resetting)
3195 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3198 dev_warn(dev, "Send error (rc=%d)\n", rc);
3204 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3206 union ibmvnic_crq crq;
3208 memset(&crq, 0, sizeof(crq));
3209 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3210 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3211 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3213 return ibmvnic_send_crq(adapter, &crq);
3216 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3218 union ibmvnic_crq crq;
3220 memset(&crq, 0, sizeof(crq));
3221 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3222 crq.version_exchange.cmd = VERSION_EXCHANGE;
3223 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3225 return ibmvnic_send_crq(adapter, &crq);
3228 struct vnic_login_client_data {
3234 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3238 /* Calculate the amount of buffer space needed for the
3239 * vnic client data in the login buffer. There are four entries,
3240 * OS name, LPAR name, device name, and a null last entry.
3242 len = 4 * sizeof(struct vnic_login_client_data);
3243 len += 6; /* "Linux" plus NULL */
3244 len += strlen(utsname()->nodename) + 1;
3245 len += strlen(adapter->netdev->name) + 1;
3250 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3251 struct vnic_login_client_data *vlcd)
3253 const char *os_name = "Linux";
3256 /* Type 1 - LPAR OS */
3258 len = strlen(os_name) + 1;
3259 vlcd->len = cpu_to_be16(len);
3260 strncpy(vlcd->name, os_name, len);
3261 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3263 /* Type 2 - LPAR name */
3265 len = strlen(utsname()->nodename) + 1;
3266 vlcd->len = cpu_to_be16(len);
3267 strncpy(vlcd->name, utsname()->nodename, len);
3268 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3270 /* Type 3 - device name */
3272 len = strlen(adapter->netdev->name) + 1;
3273 vlcd->len = cpu_to_be16(len);
3274 strncpy(vlcd->name, adapter->netdev->name, len);
3277 static int send_login(struct ibmvnic_adapter *adapter)
3279 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3280 struct ibmvnic_login_buffer *login_buffer;
3281 struct device *dev = &adapter->vdev->dev;
3282 dma_addr_t rsp_buffer_token;
3283 dma_addr_t buffer_token;
3284 size_t rsp_buffer_size;
3285 union ibmvnic_crq crq;
3289 int client_data_len;
3290 struct vnic_login_client_data *vlcd;
3293 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3294 netdev_err(adapter->netdev,
3295 "RX or TX queues are not allocated, device login failed\n");
3299 release_login_rsp_buffer(adapter);
3300 client_data_len = vnic_client_data_len(adapter);
3303 sizeof(struct ibmvnic_login_buffer) +
3304 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3307 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3309 goto buf_alloc_failed;
3311 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3313 if (dma_mapping_error(dev, buffer_token)) {
3314 dev_err(dev, "Couldn't map login buffer\n");
3315 goto buf_map_failed;
3318 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3319 sizeof(u64) * adapter->req_tx_queues +
3320 sizeof(u64) * adapter->req_rx_queues +
3321 sizeof(u64) * adapter->req_rx_queues +
3322 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3324 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3325 if (!login_rsp_buffer)
3326 goto buf_rsp_alloc_failed;
3328 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3329 rsp_buffer_size, DMA_FROM_DEVICE);
3330 if (dma_mapping_error(dev, rsp_buffer_token)) {
3331 dev_err(dev, "Couldn't map login rsp buffer\n");
3332 goto buf_rsp_map_failed;
3335 adapter->login_buf = login_buffer;
3336 adapter->login_buf_token = buffer_token;
3337 adapter->login_buf_sz = buffer_size;
3338 adapter->login_rsp_buf = login_rsp_buffer;
3339 adapter->login_rsp_buf_token = rsp_buffer_token;
3340 adapter->login_rsp_buf_sz = rsp_buffer_size;
3342 login_buffer->len = cpu_to_be32(buffer_size);
3343 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3344 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3345 login_buffer->off_txcomp_subcrqs =
3346 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3347 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3348 login_buffer->off_rxcomp_subcrqs =
3349 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3350 sizeof(u64) * adapter->req_tx_queues);
3351 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3352 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3354 tx_list_p = (__be64 *)((char *)login_buffer +
3355 sizeof(struct ibmvnic_login_buffer));
3356 rx_list_p = (__be64 *)((char *)login_buffer +
3357 sizeof(struct ibmvnic_login_buffer) +
3358 sizeof(u64) * adapter->req_tx_queues);
3360 for (i = 0; i < adapter->req_tx_queues; i++) {
3361 if (adapter->tx_scrq[i]) {
3362 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3367 for (i = 0; i < adapter->req_rx_queues; i++) {
3368 if (adapter->rx_scrq[i]) {
3369 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3374 /* Insert vNIC login client data */
3375 vlcd = (struct vnic_login_client_data *)
3376 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3377 login_buffer->client_data_offset =
3378 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3379 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3381 vnic_add_client_data(adapter, vlcd);
3383 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3384 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3385 netdev_dbg(adapter->netdev, "%016lx\n",
3386 ((unsigned long int *)(adapter->login_buf))[i]);
3389 memset(&crq, 0, sizeof(crq));
3390 crq.login.first = IBMVNIC_CRQ_CMD;
3391 crq.login.cmd = LOGIN;
3392 crq.login.ioba = cpu_to_be32(buffer_token);
3393 crq.login.len = cpu_to_be32(buffer_size);
3394 ibmvnic_send_crq(adapter, &crq);
3399 kfree(login_rsp_buffer);
3400 buf_rsp_alloc_failed:
3401 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3403 kfree(login_buffer);
3408 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3411 union ibmvnic_crq crq;
3413 memset(&crq, 0, sizeof(crq));
3414 crq.request_map.first = IBMVNIC_CRQ_CMD;
3415 crq.request_map.cmd = REQUEST_MAP;
3416 crq.request_map.map_id = map_id;
3417 crq.request_map.ioba = cpu_to_be32(addr);
3418 crq.request_map.len = cpu_to_be32(len);
3419 return ibmvnic_send_crq(adapter, &crq);
3422 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3424 union ibmvnic_crq crq;
3426 memset(&crq, 0, sizeof(crq));
3427 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3428 crq.request_unmap.cmd = REQUEST_UNMAP;
3429 crq.request_unmap.map_id = map_id;
3430 return ibmvnic_send_crq(adapter, &crq);
3433 static void send_map_query(struct ibmvnic_adapter *adapter)
3435 union ibmvnic_crq crq;
3437 memset(&crq, 0, sizeof(crq));
3438 crq.query_map.first = IBMVNIC_CRQ_CMD;
3439 crq.query_map.cmd = QUERY_MAP;
3440 ibmvnic_send_crq(adapter, &crq);
3443 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3444 static void send_cap_queries(struct ibmvnic_adapter *adapter)
3446 union ibmvnic_crq crq;
3448 atomic_set(&adapter->running_cap_crqs, 0);
3449 memset(&crq, 0, sizeof(crq));
3450 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3451 crq.query_capability.cmd = QUERY_CAPABILITY;
3453 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3454 atomic_inc(&adapter->running_cap_crqs);
3455 ibmvnic_send_crq(adapter, &crq);
3457 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3458 atomic_inc(&adapter->running_cap_crqs);
3459 ibmvnic_send_crq(adapter, &crq);
3461 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3462 atomic_inc(&adapter->running_cap_crqs);
3463 ibmvnic_send_crq(adapter, &crq);
3465 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3466 atomic_inc(&adapter->running_cap_crqs);
3467 ibmvnic_send_crq(adapter, &crq);
3469 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3470 atomic_inc(&adapter->running_cap_crqs);
3471 ibmvnic_send_crq(adapter, &crq);
3473 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3474 atomic_inc(&adapter->running_cap_crqs);
3475 ibmvnic_send_crq(adapter, &crq);
3477 crq.query_capability.capability =
3478 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3479 atomic_inc(&adapter->running_cap_crqs);
3480 ibmvnic_send_crq(adapter, &crq);
3482 crq.query_capability.capability =
3483 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3484 atomic_inc(&adapter->running_cap_crqs);
3485 ibmvnic_send_crq(adapter, &crq);
3487 crq.query_capability.capability =
3488 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3489 atomic_inc(&adapter->running_cap_crqs);
3490 ibmvnic_send_crq(adapter, &crq);
3492 crq.query_capability.capability =
3493 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3494 atomic_inc(&adapter->running_cap_crqs);
3495 ibmvnic_send_crq(adapter, &crq);
3497 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3498 atomic_inc(&adapter->running_cap_crqs);
3499 ibmvnic_send_crq(adapter, &crq);
3501 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3502 atomic_inc(&adapter->running_cap_crqs);
3503 ibmvnic_send_crq(adapter, &crq);
3505 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3506 atomic_inc(&adapter->running_cap_crqs);
3507 ibmvnic_send_crq(adapter, &crq);
3509 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3510 atomic_inc(&adapter->running_cap_crqs);
3511 ibmvnic_send_crq(adapter, &crq);
3513 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3514 atomic_inc(&adapter->running_cap_crqs);
3515 ibmvnic_send_crq(adapter, &crq);
3517 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3518 atomic_inc(&adapter->running_cap_crqs);
3519 ibmvnic_send_crq(adapter, &crq);
3521 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3522 atomic_inc(&adapter->running_cap_crqs);
3523 ibmvnic_send_crq(adapter, &crq);
3525 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3526 atomic_inc(&adapter->running_cap_crqs);
3527 ibmvnic_send_crq(adapter, &crq);
3529 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3530 atomic_inc(&adapter->running_cap_crqs);
3531 ibmvnic_send_crq(adapter, &crq);
3533 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3534 atomic_inc(&adapter->running_cap_crqs);
3535 ibmvnic_send_crq(adapter, &crq);
3537 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3538 atomic_inc(&adapter->running_cap_crqs);
3539 ibmvnic_send_crq(adapter, &crq);
3541 crq.query_capability.capability =
3542 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3543 atomic_inc(&adapter->running_cap_crqs);
3544 ibmvnic_send_crq(adapter, &crq);
3546 crq.query_capability.capability =
3547 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3548 atomic_inc(&adapter->running_cap_crqs);
3549 ibmvnic_send_crq(adapter, &crq);
3551 crq.query_capability.capability =
3552 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3553 atomic_inc(&adapter->running_cap_crqs);
3554 ibmvnic_send_crq(adapter, &crq);
3556 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3557 atomic_inc(&adapter->running_cap_crqs);
3558 ibmvnic_send_crq(adapter, &crq);
3561 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3562 struct ibmvnic_adapter *adapter)
3564 struct device *dev = &adapter->vdev->dev;
3566 if (crq->get_vpd_size_rsp.rc.code) {
3567 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3568 crq->get_vpd_size_rsp.rc.code);
3569 complete(&adapter->fw_done);
3573 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3574 complete(&adapter->fw_done);
3577 static void handle_vpd_rsp(union ibmvnic_crq *crq,
3578 struct ibmvnic_adapter *adapter)
3580 struct device *dev = &adapter->vdev->dev;
3581 unsigned char *substr = NULL;
3582 u8 fw_level_len = 0;
3584 memset(adapter->fw_version, 0, 32);
3586 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3589 if (crq->get_vpd_rsp.rc.code) {
3590 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3591 crq->get_vpd_rsp.rc.code);
3595 /* get the position of the firmware version info
3596 * located after the ASCII 'RM' substring in the buffer
3598 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3600 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
3604 /* get length of firmware level ASCII substring */
3605 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3606 fw_level_len = *(substr + 2);
3608 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3612 /* copy firmware version string from vpd into adapter */
3613 if ((substr + 3 + fw_level_len) <
3614 (adapter->vpd->buff + adapter->vpd->len)) {
3615 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
3617 dev_info(dev, "FW substr extrapolated VPD buff\n");
3621 if (adapter->fw_version[0] == '\0')
3622 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
3623 complete(&adapter->fw_done);
3626 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3628 struct device *dev = &adapter->vdev->dev;
3629 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
3630 union ibmvnic_crq crq;
3633 dma_unmap_single(dev, adapter->ip_offload_tok,
3634 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3636 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3637 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3638 netdev_dbg(adapter->netdev, "%016lx\n",
3639 ((unsigned long int *)(buf))[i]);
3641 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3642 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3643 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3644 buf->tcp_ipv4_chksum);
3645 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3646 buf->tcp_ipv6_chksum);
3647 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3648 buf->udp_ipv4_chksum);
3649 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3650 buf->udp_ipv6_chksum);
3651 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
3652 buf->large_tx_ipv4);
3653 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
3654 buf->large_tx_ipv6);
3655 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
3656 buf->large_rx_ipv4);
3657 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
3658 buf->large_rx_ipv6);
3659 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
3660 buf->max_ipv4_header_size);
3661 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
3662 buf->max_ipv6_header_size);
3663 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
3664 buf->max_tcp_header_size);
3665 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
3666 buf->max_udp_header_size);
3667 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
3668 buf->max_large_tx_size);
3669 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
3670 buf->max_large_rx_size);
3671 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
3672 buf->ipv6_extension_header);
3673 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
3674 buf->tcp_pseudosum_req);
3675 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
3676 buf->num_ipv6_ext_headers);
3677 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
3678 buf->off_ipv6_ext_headers);
3680 adapter->ip_offload_ctrl_tok =
3681 dma_map_single(dev, &adapter->ip_offload_ctrl,
3682 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
3684 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
3685 dev_err(dev, "Couldn't map ip offload control buffer\n");
3689 adapter->ip_offload_ctrl.len =
3690 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3691 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3692 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3693 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3694 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3695 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3696 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3697 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
3698 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
3699 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
3701 /* large_rx disabled for now, additional features needed */
3702 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3703 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3705 adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
3707 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3708 adapter->netdev->features |= NETIF_F_IP_CSUM;
3710 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3711 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
3713 if ((adapter->netdev->features &
3714 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3715 adapter->netdev->features |= NETIF_F_RXCSUM;
3717 if (buf->large_tx_ipv4)
3718 adapter->netdev->features |= NETIF_F_TSO;
3719 if (buf->large_tx_ipv6)
3720 adapter->netdev->features |= NETIF_F_TSO6;
3722 adapter->netdev->hw_features |= adapter->netdev->features;
3724 memset(&crq, 0, sizeof(crq));
3725 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
3726 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
3727 crq.control_ip_offload.len =
3728 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3729 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
3730 ibmvnic_send_crq(adapter, &crq);
3733 static void handle_error_info_rsp(union ibmvnic_crq *crq,
3734 struct ibmvnic_adapter *adapter)
3736 struct device *dev = &adapter->vdev->dev;
3737 struct ibmvnic_error_buff *error_buff, *tmp;
3738 unsigned long flags;
3742 if (!crq->request_error_rsp.rc.code) {
3743 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
3744 crq->request_error_rsp.rc.code);
3748 spin_lock_irqsave(&adapter->error_list_lock, flags);
3749 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
3750 if (error_buff->error_id == crq->request_error_rsp.error_id) {
3752 list_del(&error_buff->list);
3755 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3758 dev_err(dev, "Couldn't find error id %x\n",
3759 be32_to_cpu(crq->request_error_rsp.error_id));
3763 dev_err(dev, "Detailed info for error id %x:",
3764 be32_to_cpu(crq->request_error_rsp.error_id));
3766 for (i = 0; i < error_buff->len; i++) {
3767 pr_cont("%02x", (int)error_buff->buff[i]);
3773 dma_unmap_single(dev, error_buff->dma, error_buff->len,
3775 kfree(error_buff->buff);
3779 static void request_error_information(struct ibmvnic_adapter *adapter,
3780 union ibmvnic_crq *err_crq)
3782 struct device *dev = &adapter->vdev->dev;
3783 struct net_device *netdev = adapter->netdev;
3784 struct ibmvnic_error_buff *error_buff;
3785 unsigned long timeout = msecs_to_jiffies(30000);
3786 union ibmvnic_crq crq;
3787 unsigned long flags;
3790 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
3794 detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
3795 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
3796 if (!error_buff->buff) {
3801 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
3803 if (dma_mapping_error(dev, error_buff->dma)) {
3804 netdev_err(netdev, "Couldn't map error buffer\n");
3805 kfree(error_buff->buff);
3810 error_buff->len = detail_len;
3811 error_buff->error_id = err_crq->error_indication.error_id;
3813 spin_lock_irqsave(&adapter->error_list_lock, flags);
3814 list_add_tail(&error_buff->list, &adapter->errors);
3815 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3817 memset(&crq, 0, sizeof(crq));
3818 crq.request_error_info.first = IBMVNIC_CRQ_CMD;
3819 crq.request_error_info.cmd = REQUEST_ERROR_INFO;
3820 crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
3821 crq.request_error_info.len = cpu_to_be32(detail_len);
3822 crq.request_error_info.error_id = err_crq->error_indication.error_id;
3824 rc = ibmvnic_send_crq(adapter, &crq);
3826 netdev_err(netdev, "failed to request error information\n");
3830 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3831 netdev_err(netdev, "timeout waiting for error information\n");
3838 spin_lock_irqsave(&adapter->error_list_lock, flags);
3839 list_del(&error_buff->list);
3840 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3842 kfree(error_buff->buff);
3846 static void handle_error_indication(union ibmvnic_crq *crq,
3847 struct ibmvnic_adapter *adapter)
3849 struct device *dev = &adapter->vdev->dev;
3851 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
3852 crq->error_indication.flags
3853 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
3854 be32_to_cpu(crq->error_indication.error_id),
3855 be16_to_cpu(crq->error_indication.error_cause));
3857 if (be32_to_cpu(crq->error_indication.error_id))
3858 request_error_information(adapter, crq);
3860 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
3861 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3863 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
3866 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
3867 struct ibmvnic_adapter *adapter)
3869 struct net_device *netdev = adapter->netdev;
3870 struct device *dev = &adapter->vdev->dev;
3873 rc = crq->change_mac_addr_rsp.rc.code;
3875 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
3878 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
3881 complete(&adapter->fw_done);
3885 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3886 struct ibmvnic_adapter *adapter)
3888 struct device *dev = &adapter->vdev->dev;
3892 atomic_dec(&adapter->running_cap_crqs);
3893 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
3895 req_value = &adapter->req_tx_queues;
3899 req_value = &adapter->req_rx_queues;
3902 case REQ_RX_ADD_QUEUES:
3903 req_value = &adapter->req_rx_add_queues;
3906 case REQ_TX_ENTRIES_PER_SUBCRQ:
3907 req_value = &adapter->req_tx_entries_per_subcrq;
3908 name = "tx_entries_per_subcrq";
3910 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
3911 req_value = &adapter->req_rx_add_entries_per_subcrq;
3912 name = "rx_add_entries_per_subcrq";
3915 req_value = &adapter->req_mtu;
3918 case PROMISC_REQUESTED:
3919 req_value = &adapter->promisc;
3923 dev_err(dev, "Got invalid cap request rsp %d\n",
3924 crq->request_capability.capability);
3928 switch (crq->request_capability_rsp.rc.code) {
3931 case PARTIALSUCCESS:
3932 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3934 (long int)be64_to_cpu(crq->request_capability_rsp.
3937 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3939 pr_err("mtu of %llu is not supported. Reverting.\n",
3941 *req_value = adapter->fallback.mtu;
3944 be64_to_cpu(crq->request_capability_rsp.number);
3947 ibmvnic_send_req_caps(adapter, 1);
3950 dev_err(dev, "Error %d in request cap rsp\n",
3951 crq->request_capability_rsp.rc.code);
3955 /* Done receiving requested capabilities, query IP offload support */
3956 if (atomic_read(&adapter->running_cap_crqs) == 0) {
3957 union ibmvnic_crq newcrq;
3958 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
3959 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
3960 &adapter->ip_offload_buf;
3962 adapter->wait_capability = false;
3963 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
3967 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
3968 if (!firmware_has_feature(FW_FEATURE_CMO))
3969 dev_err(dev, "Couldn't map offload buffer\n");
3973 memset(&newcrq, 0, sizeof(newcrq));
3974 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
3975 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
3976 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
3977 newcrq.query_ip_offload.ioba =
3978 cpu_to_be32(adapter->ip_offload_tok);
3980 ibmvnic_send_crq(adapter, &newcrq);
3984 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
3985 struct ibmvnic_adapter *adapter)
3987 struct device *dev = &adapter->vdev->dev;
3988 struct net_device *netdev = adapter->netdev;
3989 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
3990 struct ibmvnic_login_buffer *login = adapter->login_buf;
3993 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
3995 dma_unmap_single(dev, adapter->login_rsp_buf_token,
3996 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
3998 /* If the number of queues requested can't be allocated by the
3999 * server, the login response will return with code 1. We will need
4000 * to resend the login buffer with fewer queues requested.
4002 if (login_rsp_crq->generic.rc.code) {
4003 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4004 complete(&adapter->init_done);
4008 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4010 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4011 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4012 netdev_dbg(adapter->netdev, "%016lx\n",
4013 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4017 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4018 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4019 adapter->req_rx_add_queues !=
4020 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4021 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4022 ibmvnic_remove(adapter->vdev);
4025 release_login_buffer(adapter);
4026 complete(&adapter->init_done);
4031 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4032 struct ibmvnic_adapter *adapter)
4034 struct device *dev = &adapter->vdev->dev;
4037 rc = crq->request_unmap_rsp.rc.code;
4039 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4042 static void handle_query_map_rsp(union ibmvnic_crq *crq,
4043 struct ibmvnic_adapter *adapter)
4045 struct net_device *netdev = adapter->netdev;
4046 struct device *dev = &adapter->vdev->dev;
4049 rc = crq->query_map_rsp.rc.code;
4051 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4054 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4055 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4056 crq->query_map_rsp.free_pages);
4059 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4060 struct ibmvnic_adapter *adapter)
4062 struct net_device *netdev = adapter->netdev;
4063 struct device *dev = &adapter->vdev->dev;
4066 atomic_dec(&adapter->running_cap_crqs);
4067 netdev_dbg(netdev, "Outstanding queries: %d\n",
4068 atomic_read(&adapter->running_cap_crqs));
4069 rc = crq->query_capability.rc.code;
4071 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4075 switch (be16_to_cpu(crq->query_capability.capability)) {
4077 adapter->min_tx_queues =
4078 be64_to_cpu(crq->query_capability.number);
4079 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4080 adapter->min_tx_queues);
4083 adapter->min_rx_queues =
4084 be64_to_cpu(crq->query_capability.number);
4085 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4086 adapter->min_rx_queues);
4088 case MIN_RX_ADD_QUEUES:
4089 adapter->min_rx_add_queues =
4090 be64_to_cpu(crq->query_capability.number);
4091 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4092 adapter->min_rx_add_queues);
4095 adapter->max_tx_queues =
4096 be64_to_cpu(crq->query_capability.number);
4097 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4098 adapter->max_tx_queues);
4101 adapter->max_rx_queues =
4102 be64_to_cpu(crq->query_capability.number);
4103 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4104 adapter->max_rx_queues);
4106 case MAX_RX_ADD_QUEUES:
4107 adapter->max_rx_add_queues =
4108 be64_to_cpu(crq->query_capability.number);
4109 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4110 adapter->max_rx_add_queues);
4112 case MIN_TX_ENTRIES_PER_SUBCRQ:
4113 adapter->min_tx_entries_per_subcrq =
4114 be64_to_cpu(crq->query_capability.number);
4115 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4116 adapter->min_tx_entries_per_subcrq);
4118 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4119 adapter->min_rx_add_entries_per_subcrq =
4120 be64_to_cpu(crq->query_capability.number);
4121 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4122 adapter->min_rx_add_entries_per_subcrq);
4124 case MAX_TX_ENTRIES_PER_SUBCRQ:
4125 adapter->max_tx_entries_per_subcrq =
4126 be64_to_cpu(crq->query_capability.number);
4127 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4128 adapter->max_tx_entries_per_subcrq);
4130 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4131 adapter->max_rx_add_entries_per_subcrq =
4132 be64_to_cpu(crq->query_capability.number);
4133 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4134 adapter->max_rx_add_entries_per_subcrq);
4136 case TCP_IP_OFFLOAD:
4137 adapter->tcp_ip_offload =
4138 be64_to_cpu(crq->query_capability.number);
4139 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4140 adapter->tcp_ip_offload);
4142 case PROMISC_SUPPORTED:
4143 adapter->promisc_supported =
4144 be64_to_cpu(crq->query_capability.number);
4145 netdev_dbg(netdev, "promisc_supported = %lld\n",
4146 adapter->promisc_supported);
4149 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4150 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4151 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4154 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4155 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4156 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4158 case MAX_MULTICAST_FILTERS:
4159 adapter->max_multicast_filters =
4160 be64_to_cpu(crq->query_capability.number);
4161 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4162 adapter->max_multicast_filters);
4164 case VLAN_HEADER_INSERTION:
4165 adapter->vlan_header_insertion =
4166 be64_to_cpu(crq->query_capability.number);
4167 if (adapter->vlan_header_insertion)
4168 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4169 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4170 adapter->vlan_header_insertion);
4172 case RX_VLAN_HEADER_INSERTION:
4173 adapter->rx_vlan_header_insertion =
4174 be64_to_cpu(crq->query_capability.number);
4175 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4176 adapter->rx_vlan_header_insertion);
4178 case MAX_TX_SG_ENTRIES:
4179 adapter->max_tx_sg_entries =
4180 be64_to_cpu(crq->query_capability.number);
4181 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4182 adapter->max_tx_sg_entries);
4184 case RX_SG_SUPPORTED:
4185 adapter->rx_sg_supported =
4186 be64_to_cpu(crq->query_capability.number);
4187 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4188 adapter->rx_sg_supported);
4190 case OPT_TX_COMP_SUB_QUEUES:
4191 adapter->opt_tx_comp_sub_queues =
4192 be64_to_cpu(crq->query_capability.number);
4193 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4194 adapter->opt_tx_comp_sub_queues);
4196 case OPT_RX_COMP_QUEUES:
4197 adapter->opt_rx_comp_queues =
4198 be64_to_cpu(crq->query_capability.number);
4199 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4200 adapter->opt_rx_comp_queues);
4202 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4203 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4204 be64_to_cpu(crq->query_capability.number);
4205 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4206 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4208 case OPT_TX_ENTRIES_PER_SUBCRQ:
4209 adapter->opt_tx_entries_per_subcrq =
4210 be64_to_cpu(crq->query_capability.number);
4211 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4212 adapter->opt_tx_entries_per_subcrq);
4214 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4215 adapter->opt_rxba_entries_per_subcrq =
4216 be64_to_cpu(crq->query_capability.number);
4217 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4218 adapter->opt_rxba_entries_per_subcrq);
4220 case TX_RX_DESC_REQ:
4221 adapter->tx_rx_desc_req = crq->query_capability.number;
4222 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4223 adapter->tx_rx_desc_req);
4227 netdev_err(netdev, "Got invalid cap rsp %d\n",
4228 crq->query_capability.capability);
4232 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4233 adapter->wait_capability = false;
4234 ibmvnic_send_req_caps(adapter, 0);
4238 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4239 struct ibmvnic_adapter *adapter)
4241 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4242 struct net_device *netdev = adapter->netdev;
4243 struct device *dev = &adapter->vdev->dev;
4244 u64 *u64_crq = (u64 *)crq;
4247 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4248 (unsigned long int)cpu_to_be64(u64_crq[0]),
4249 (unsigned long int)cpu_to_be64(u64_crq[1]));
4250 switch (gen_crq->first) {
4251 case IBMVNIC_CRQ_INIT_RSP:
4252 switch (gen_crq->cmd) {
4253 case IBMVNIC_CRQ_INIT:
4254 dev_info(dev, "Partner initialized\n");
4255 adapter->from_passive_init = true;
4256 adapter->failover_pending = false;
4257 if (!completion_done(&adapter->init_done)) {
4258 complete(&adapter->init_done);
4259 adapter->init_done_rc = -EIO;
4261 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4263 case IBMVNIC_CRQ_INIT_COMPLETE:
4264 dev_info(dev, "Partner initialization complete\n");
4265 adapter->crq.active = true;
4266 send_version_xchg(adapter);
4269 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4272 case IBMVNIC_CRQ_XPORT_EVENT:
4273 netif_carrier_off(netdev);
4274 adapter->crq.active = false;
4275 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4276 dev_info(dev, "Migrated, re-enabling adapter\n");
4277 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4278 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4279 dev_info(dev, "Backing device failover detected\n");
4280 adapter->failover_pending = true;
4282 /* The adapter lost the connection */
4283 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4285 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4288 case IBMVNIC_CRQ_CMD_RSP:
4291 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4296 switch (gen_crq->cmd) {
4297 case VERSION_EXCHANGE_RSP:
4298 rc = crq->version_exchange_rsp.rc.code;
4300 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4303 dev_info(dev, "Partner protocol version is %d\n",
4304 crq->version_exchange_rsp.version);
4305 if (be16_to_cpu(crq->version_exchange_rsp.version) <
4308 be16_to_cpu(crq->version_exchange_rsp.version);
4309 send_cap_queries(adapter);
4311 case QUERY_CAPABILITY_RSP:
4312 handle_query_cap_rsp(crq, adapter);
4315 handle_query_map_rsp(crq, adapter);
4317 case REQUEST_MAP_RSP:
4318 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4319 complete(&adapter->fw_done);
4321 case REQUEST_UNMAP_RSP:
4322 handle_request_unmap_rsp(crq, adapter);
4324 case REQUEST_CAPABILITY_RSP:
4325 handle_request_cap_rsp(crq, adapter);
4328 netdev_dbg(netdev, "Got Login Response\n");
4329 handle_login_rsp(crq, adapter);
4331 case LOGICAL_LINK_STATE_RSP:
4333 "Got Logical Link State Response, state: %d rc: %d\n",
4334 crq->logical_link_state_rsp.link_state,
4335 crq->logical_link_state_rsp.rc.code);
4336 adapter->logical_link_state =
4337 crq->logical_link_state_rsp.link_state;
4338 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4339 complete(&adapter->init_done);
4341 case LINK_STATE_INDICATION:
4342 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4343 adapter->phys_link_state =
4344 crq->link_state_indication.phys_link_state;
4345 adapter->logical_link_state =
4346 crq->link_state_indication.logical_link_state;
4348 case CHANGE_MAC_ADDR_RSP:
4349 netdev_dbg(netdev, "Got MAC address change Response\n");
4350 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4352 case ERROR_INDICATION:
4353 netdev_dbg(netdev, "Got Error Indication\n");
4354 handle_error_indication(crq, adapter);
4356 case REQUEST_ERROR_RSP:
4357 netdev_dbg(netdev, "Got Error Detail Response\n");
4358 handle_error_info_rsp(crq, adapter);
4360 case REQUEST_STATISTICS_RSP:
4361 netdev_dbg(netdev, "Got Statistics Response\n");
4362 complete(&adapter->stats_done);
4364 case QUERY_IP_OFFLOAD_RSP:
4365 netdev_dbg(netdev, "Got Query IP offload Response\n");
4366 handle_query_ip_offload_rsp(adapter);
4368 case MULTICAST_CTRL_RSP:
4369 netdev_dbg(netdev, "Got multicast control Response\n");
4371 case CONTROL_IP_OFFLOAD_RSP:
4372 netdev_dbg(netdev, "Got Control IP offload Response\n");
4373 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4374 sizeof(adapter->ip_offload_ctrl),
4376 complete(&adapter->init_done);
4378 case COLLECT_FW_TRACE_RSP:
4379 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4380 complete(&adapter->fw_done);
4382 case GET_VPD_SIZE_RSP:
4383 handle_vpd_size_rsp(crq, adapter);
4386 handle_vpd_rsp(crq, adapter);
4389 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4394 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4396 struct ibmvnic_adapter *adapter = instance;
4398 tasklet_schedule(&adapter->tasklet);
4402 static void ibmvnic_tasklet(void *data)
4404 struct ibmvnic_adapter *adapter = data;
4405 struct ibmvnic_crq_queue *queue = &adapter->crq;
4406 union ibmvnic_crq *crq;
4407 unsigned long flags;
4410 spin_lock_irqsave(&queue->lock, flags);
4412 /* Pull all the valid messages off the CRQ */
4413 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4414 ibmvnic_handle_crq(crq, adapter);
4415 crq->generic.first = 0;
4418 /* remain in tasklet until all
4419 * capabilities responses are received
4421 if (!adapter->wait_capability)
4424 /* if capabilities CRQ's were sent in this tasklet, the following
4425 * tasklet must wait until all responses are received
4427 if (atomic_read(&adapter->running_cap_crqs) != 0)
4428 adapter->wait_capability = true;
4429 spin_unlock_irqrestore(&queue->lock, flags);
4432 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4434 struct vio_dev *vdev = adapter->vdev;
4438 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4439 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4442 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4447 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4449 struct ibmvnic_crq_queue *crq = &adapter->crq;
4450 struct device *dev = &adapter->vdev->dev;
4451 struct vio_dev *vdev = adapter->vdev;
4456 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4457 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4459 /* Clean out the queue */
4460 memset(crq->msgs, 0, PAGE_SIZE);
4462 crq->active = false;
4464 /* And re-open it again */
4465 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4466 crq->msg_token, PAGE_SIZE);
4469 /* Adapter is good, but other end is not ready */
4470 dev_warn(dev, "Partner adapter not ready\n");
4472 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4477 static void release_crq_queue(struct ibmvnic_adapter *adapter)
4479 struct ibmvnic_crq_queue *crq = &adapter->crq;
4480 struct vio_dev *vdev = adapter->vdev;
4486 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4487 free_irq(vdev->irq, adapter);
4488 tasklet_kill(&adapter->tasklet);
4490 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4491 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4493 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4495 free_page((unsigned long)crq->msgs);
4497 crq->active = false;
4500 static int init_crq_queue(struct ibmvnic_adapter *adapter)
4502 struct ibmvnic_crq_queue *crq = &adapter->crq;
4503 struct device *dev = &adapter->vdev->dev;
4504 struct vio_dev *vdev = adapter->vdev;
4505 int rc, retrc = -ENOMEM;
4510 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4511 /* Should we allocate more than one page? */
4516 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4517 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4519 if (dma_mapping_error(dev, crq->msg_token))
4522 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4523 crq->msg_token, PAGE_SIZE);
4525 if (rc == H_RESOURCE)
4526 /* maybe kexecing and resource is busy. try a reset */
4527 rc = ibmvnic_reset_crq(adapter);
4530 if (rc == H_CLOSED) {
4531 dev_warn(dev, "Partner adapter not ready\n");
4533 dev_warn(dev, "Error %d opening adapter\n", rc);
4534 goto reg_crq_failed;
4539 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4540 (unsigned long)adapter);
4542 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4543 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
4546 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4548 goto req_irq_failed;
4551 rc = vio_enable_interrupts(vdev);
4553 dev_err(dev, "Error %d enabling interrupts\n", rc);
4554 goto req_irq_failed;
4558 spin_lock_init(&crq->lock);
4563 tasklet_kill(&adapter->tasklet);
4565 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4566 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4568 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4570 free_page((unsigned long)crq->msgs);
4575 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
4577 struct device *dev = &adapter->vdev->dev;
4578 unsigned long timeout = msecs_to_jiffies(30000);
4579 u64 old_num_rx_queues, old_num_tx_queues;
4582 adapter->from_passive_init = false;
4584 old_num_rx_queues = adapter->req_rx_queues;
4585 old_num_tx_queues = adapter->req_tx_queues;
4587 init_completion(&adapter->init_done);
4588 adapter->init_done_rc = 0;
4589 ibmvnic_send_crq_init(adapter);
4590 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4591 dev_err(dev, "Initialization sequence timed out\n");
4595 if (adapter->init_done_rc) {
4596 release_crq_queue(adapter);
4597 return adapter->init_done_rc;
4600 if (adapter->from_passive_init) {
4601 adapter->state = VNIC_OPEN;
4602 adapter->from_passive_init = false;
4606 if (adapter->resetting && !adapter->wait_for_reset &&
4607 adapter->reset_reason != VNIC_RESET_MOBILITY) {
4608 if (adapter->req_rx_queues != old_num_rx_queues ||
4609 adapter->req_tx_queues != old_num_tx_queues) {
4610 release_sub_crqs(adapter, 0);
4611 rc = init_sub_crqs(adapter);
4613 rc = reset_sub_crq_queues(adapter);
4616 rc = init_sub_crqs(adapter);
4620 dev_err(dev, "Initialization of sub crqs failed\n");
4621 release_crq_queue(adapter);
4625 rc = init_sub_crq_irqs(adapter);
4627 dev_err(dev, "Failed to initialize sub crq irqs\n");
4628 release_crq_queue(adapter);
4634 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4636 struct device *dev = &adapter->vdev->dev;
4637 unsigned long timeout = msecs_to_jiffies(30000);
4640 adapter->from_passive_init = false;
4642 init_completion(&adapter->init_done);
4643 adapter->init_done_rc = 0;
4644 ibmvnic_send_crq_init(adapter);
4645 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4646 dev_err(dev, "Initialization sequence timed out\n");
4650 if (adapter->init_done_rc) {
4651 release_crq_queue(adapter);
4652 return adapter->init_done_rc;
4655 if (adapter->from_passive_init) {
4656 adapter->state = VNIC_OPEN;
4657 adapter->from_passive_init = false;
4661 rc = init_sub_crqs(adapter);
4663 dev_err(dev, "Initialization of sub crqs failed\n");
4664 release_crq_queue(adapter);
4668 rc = init_sub_crq_irqs(adapter);
4670 dev_err(dev, "Failed to initialize sub crq irqs\n");
4671 release_crq_queue(adapter);
4677 static struct device_attribute dev_attr_failover;
4679 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4681 struct ibmvnic_adapter *adapter;
4682 struct net_device *netdev;
4683 unsigned char *mac_addr_p;
4686 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
4689 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
4690 VETH_MAC_ADDR, NULL);
4693 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4694 __FILE__, __LINE__);
4698 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
4699 IBMVNIC_MAX_QUEUES);
4703 adapter = netdev_priv(netdev);
4704 adapter->state = VNIC_PROBING;
4705 dev_set_drvdata(&dev->dev, netdev);
4706 adapter->vdev = dev;
4707 adapter->netdev = netdev;
4709 ether_addr_copy(adapter->mac_addr, mac_addr_p);
4710 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4711 netdev->irq = dev->irq;
4712 netdev->netdev_ops = &ibmvnic_netdev_ops;
4713 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
4714 SET_NETDEV_DEV(netdev, &dev->dev);
4716 spin_lock_init(&adapter->stats_lock);
4718 INIT_LIST_HEAD(&adapter->errors);
4719 spin_lock_init(&adapter->error_list_lock);
4721 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4722 INIT_LIST_HEAD(&adapter->rwi_list);
4723 mutex_init(&adapter->reset_lock);
4724 mutex_init(&adapter->rwi_lock);
4725 adapter->resetting = false;
4727 adapter->mac_change_pending = false;
4730 rc = init_crq_queue(adapter);
4732 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
4734 goto ibmvnic_init_fail;
4737 rc = ibmvnic_init(adapter);
4738 if (rc && rc != EAGAIN)
4739 goto ibmvnic_init_fail;
4740 } while (rc == EAGAIN);
4742 rc = init_stats_buffers(adapter);
4744 goto ibmvnic_init_fail;
4746 rc = init_stats_token(adapter);
4748 goto ibmvnic_stats_fail;
4750 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4751 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4752 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4754 rc = device_create_file(&dev->dev, &dev_attr_failover);
4756 goto ibmvnic_dev_file_err;
4758 netif_carrier_off(netdev);
4759 rc = register_netdev(netdev);
4761 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
4762 goto ibmvnic_register_fail;
4764 dev_info(&dev->dev, "ibmvnic registered\n");
4766 adapter->state = VNIC_PROBED;
4768 adapter->wait_for_reset = false;
4772 ibmvnic_register_fail:
4773 device_remove_file(&dev->dev, &dev_attr_failover);
4775 ibmvnic_dev_file_err:
4776 release_stats_token(adapter);
4779 release_stats_buffers(adapter);
4782 release_sub_crqs(adapter, 1);
4783 release_crq_queue(adapter);
4784 free_netdev(netdev);
4789 static int ibmvnic_remove(struct vio_dev *dev)
4791 struct net_device *netdev = dev_get_drvdata(&dev->dev);
4792 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4794 adapter->state = VNIC_REMOVING;
4795 unregister_netdev(netdev);
4796 mutex_lock(&adapter->reset_lock);
4798 release_resources(adapter);
4799 release_sub_crqs(adapter, 1);
4800 release_crq_queue(adapter);
4802 release_stats_token(adapter);
4803 release_stats_buffers(adapter);
4805 adapter->state = VNIC_REMOVED;
4807 mutex_unlock(&adapter->reset_lock);
4808 device_remove_file(&dev->dev, &dev_attr_failover);
4809 free_netdev(netdev);
4810 dev_set_drvdata(&dev->dev, NULL);
4815 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
4816 const char *buf, size_t count)
4818 struct net_device *netdev = dev_get_drvdata(dev);
4819 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4820 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
4821 __be64 session_token;
4824 if (!sysfs_streq(buf, "1"))
4827 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
4828 H_GET_SESSION_TOKEN, 0, 0, 0);
4830 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
4835 session_token = (__be64)retbuf[0];
4836 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
4837 be64_to_cpu(session_token));
4838 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4839 H_SESSION_ERR_DETECTED, session_token, 0, 0);
4841 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
4849 static DEVICE_ATTR_WO(failover);
4851 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
4853 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
4854 struct ibmvnic_adapter *adapter;
4855 struct iommu_table *tbl;
4856 unsigned long ret = 0;
4859 tbl = get_iommu_table_base(&vdev->dev);
4861 /* netdev inits at probe time along with the structures we need below*/
4863 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
4865 adapter = netdev_priv(netdev);
4867 ret += PAGE_SIZE; /* the crq message queue */
4868 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
4870 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
4871 ret += 4 * PAGE_SIZE; /* the scrq message queue */
4873 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4875 ret += adapter->rx_pool[i].size *
4876 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
4881 static int ibmvnic_resume(struct device *dev)
4883 struct net_device *netdev = dev_get_drvdata(dev);
4884 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4886 if (adapter->state != VNIC_OPEN)
4889 tasklet_schedule(&adapter->tasklet);
4894 static const struct vio_device_id ibmvnic_device_table[] = {
4895 {"network", "IBM,vnic"},
4898 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
4900 static const struct dev_pm_ops ibmvnic_pm_ops = {
4901 .resume = ibmvnic_resume
4904 static struct vio_driver ibmvnic_driver = {
4905 .id_table = ibmvnic_device_table,
4906 .probe = ibmvnic_probe,
4907 .remove = ibmvnic_remove,
4908 .get_desired_dma = ibmvnic_get_desired_dma,
4909 .name = ibmvnic_driver_name,
4910 .pm = &ibmvnic_pm_ops,
4913 /* module functions */
4914 static int __init ibmvnic_module_init(void)
4916 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
4917 IBMVNIC_DRIVER_VERSION);
4919 return vio_register_driver(&ibmvnic_driver);
4922 static void __exit ibmvnic_module_exit(void)
4924 vio_unregister_driver(&ibmvnic_driver);
4927 module_init(ibmvnic_module_init);
4928 module_exit(ibmvnic_module_exit);