1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
62 #include <linux/if_arp.h>
65 #include <linux/ipv6.h>
66 #include <linux/irq.h>
67 #include <linux/kthread.h>
68 #include <linux/seq_file.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/workqueue.h>
78 #include <linux/if_vlan.h>
79 #include <linux/utsname.h>
83 static const char ibmvnic_driver_name[] = "ibmvnic";
84 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
86 MODULE_AUTHOR("Santiago Leon");
87 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
91 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
92 static int ibmvnic_remove(struct vio_dev *);
93 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
94 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
95 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
96 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
97 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
98 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
99 union sub_crq *sub_crq);
100 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
101 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
102 static int enable_scrq_irq(struct ibmvnic_adapter *,
103 struct ibmvnic_sub_crq_queue *);
104 static int disable_scrq_irq(struct ibmvnic_adapter *,
105 struct ibmvnic_sub_crq_queue *);
106 static int pending_scrq(struct ibmvnic_adapter *,
107 struct ibmvnic_sub_crq_queue *);
108 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
109 struct ibmvnic_sub_crq_queue *);
110 static int ibmvnic_poll(struct napi_struct *napi, int data);
111 static void send_map_query(struct ibmvnic_adapter *adapter);
112 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
113 static void send_request_unmap(struct ibmvnic_adapter *, u8);
114 static int send_login(struct ibmvnic_adapter *adapter);
115 static void send_cap_queries(struct ibmvnic_adapter *adapter);
116 static int init_sub_crqs(struct ibmvnic_adapter *);
117 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
118 static int ibmvnic_init(struct ibmvnic_adapter *);
119 static void release_crq_queue(struct ibmvnic_adapter *);
120 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
122 struct ibmvnic_stat {
123 char name[ETH_GSTRING_LEN];
127 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
128 offsetof(struct ibmvnic_statistics, stat))
129 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
131 static const struct ibmvnic_stat ibmvnic_stats[] = {
132 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
133 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
134 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
135 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
136 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
137 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
138 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
139 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
140 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
141 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
142 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
143 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
144 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
145 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
146 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
147 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
148 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
149 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
150 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
151 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
152 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
153 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
156 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
157 unsigned long length, unsigned long *number,
160 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
163 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
170 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
171 struct ibmvnic_long_term_buff *ltb, int size)
173 struct device *dev = &adapter->vdev->dev;
176 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
180 dev_err(dev, "Couldn't alloc long term buffer\n");
183 ltb->map_id = adapter->map_id;
186 init_completion(&adapter->fw_done);
187 send_request_map(adapter, ltb->addr,
188 ltb->size, ltb->map_id);
189 wait_for_completion(&adapter->fw_done);
191 if (adapter->fw_done_rc) {
192 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
193 adapter->fw_done_rc);
199 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
200 struct ibmvnic_long_term_buff *ltb)
202 struct device *dev = &adapter->vdev->dev;
207 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
208 adapter->reset_reason != VNIC_RESET_MOBILITY)
209 send_request_unmap(adapter, ltb->map_id);
210 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
213 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
214 struct ibmvnic_long_term_buff *ltb)
216 memset(ltb->buff, 0, ltb->size);
218 init_completion(&adapter->fw_done);
219 send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
220 wait_for_completion(&adapter->fw_done);
222 if (adapter->fw_done_rc) {
223 dev_info(&adapter->vdev->dev,
224 "Reset failed, attempting to free and reallocate buffer\n");
225 free_long_term_buff(adapter, ltb);
226 return alloc_long_term_buff(adapter, ltb, ltb->size);
231 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
235 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
237 adapter->rx_pool[i].active = 0;
240 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
241 struct ibmvnic_rx_pool *pool)
243 int count = pool->size - atomic_read(&pool->available);
244 struct device *dev = &adapter->vdev->dev;
245 int buffers_added = 0;
246 unsigned long lpar_rc;
247 union sub_crq sub_crq;
260 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
261 be32_to_cpu(adapter->login_rsp_buf->
264 for (i = 0; i < count; ++i) {
265 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
267 dev_err(dev, "Couldn't replenish rx buff\n");
268 adapter->replenish_no_mem++;
272 index = pool->free_map[pool->next_free];
274 if (pool->rx_buff[index].skb)
275 dev_err(dev, "Inconsistent free_map!\n");
277 /* Copy the skb to the long term mapped DMA buffer */
278 offset = index * pool->buff_size;
279 dst = pool->long_term_buff.buff + offset;
280 memset(dst, 0, pool->buff_size);
281 dma_addr = pool->long_term_buff.addr + offset;
282 pool->rx_buff[index].data = dst;
284 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
285 pool->rx_buff[index].dma = dma_addr;
286 pool->rx_buff[index].skb = skb;
287 pool->rx_buff[index].pool_index = pool->index;
288 pool->rx_buff[index].size = pool->buff_size;
290 memset(&sub_crq, 0, sizeof(sub_crq));
291 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
292 sub_crq.rx_add.correlator =
293 cpu_to_be64((u64)&pool->rx_buff[index]);
294 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
295 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
297 /* The length field of the sCRQ is defined to be 24 bits so the
298 * buffer size needs to be left shifted by a byte before it is
299 * converted to big endian to prevent the last byte from being
302 #ifdef __LITTLE_ENDIAN__
305 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
307 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
309 if (lpar_rc != H_SUCCESS)
313 adapter->replenish_add_buff_success++;
314 pool->next_free = (pool->next_free + 1) % pool->size;
316 atomic_add(buffers_added, &pool->available);
320 dev_info(dev, "replenish pools failure\n");
321 pool->free_map[pool->next_free] = index;
322 pool->rx_buff[index].skb = NULL;
323 if (!dma_mapping_error(dev, dma_addr))
324 dma_unmap_single(dev, dma_addr, pool->buff_size,
327 dev_kfree_skb_any(skb);
328 adapter->replenish_add_buff_failure++;
329 atomic_add(buffers_added, &pool->available);
331 if (lpar_rc == H_CLOSED) {
332 /* Disable buffer pool replenishment and report carrier off if
333 * queue is closed. Firmware guarantees that a signal will
334 * be sent to the driver, triggering a reset.
336 deactivate_rx_pools(adapter);
337 netif_carrier_off(adapter->netdev);
341 static void replenish_pools(struct ibmvnic_adapter *adapter)
345 adapter->replenish_task_cycles++;
346 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
348 if (adapter->rx_pool[i].active)
349 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
353 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
355 kfree(adapter->tx_stats_buffers);
356 kfree(adapter->rx_stats_buffers);
357 adapter->tx_stats_buffers = NULL;
358 adapter->rx_stats_buffers = NULL;
361 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
363 adapter->tx_stats_buffers =
364 kcalloc(IBMVNIC_MAX_QUEUES,
365 sizeof(struct ibmvnic_tx_queue_stats),
367 if (!adapter->tx_stats_buffers)
370 adapter->rx_stats_buffers =
371 kcalloc(IBMVNIC_MAX_QUEUES,
372 sizeof(struct ibmvnic_rx_queue_stats),
374 if (!adapter->rx_stats_buffers)
380 static void release_stats_token(struct ibmvnic_adapter *adapter)
382 struct device *dev = &adapter->vdev->dev;
384 if (!adapter->stats_token)
387 dma_unmap_single(dev, adapter->stats_token,
388 sizeof(struct ibmvnic_statistics),
390 adapter->stats_token = 0;
393 static int init_stats_token(struct ibmvnic_adapter *adapter)
395 struct device *dev = &adapter->vdev->dev;
398 stok = dma_map_single(dev, &adapter->stats,
399 sizeof(struct ibmvnic_statistics),
401 if (dma_mapping_error(dev, stok)) {
402 dev_err(dev, "Couldn't map stats buffer\n");
406 adapter->stats_token = stok;
407 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
411 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
413 struct ibmvnic_rx_pool *rx_pool;
418 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
419 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
421 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
422 for (i = 0; i < rx_scrqs; i++) {
423 rx_pool = &adapter->rx_pool[i];
425 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
427 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
428 free_long_term_buff(adapter, &rx_pool->long_term_buff);
429 rx_pool->buff_size = be64_to_cpu(size_array[i]);
430 alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
434 rc = reset_long_term_buff(adapter,
435 &rx_pool->long_term_buff);
441 for (j = 0; j < rx_pool->size; j++)
442 rx_pool->free_map[j] = j;
444 memset(rx_pool->rx_buff, 0,
445 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
447 atomic_set(&rx_pool->available, 0);
448 rx_pool->next_alloc = 0;
449 rx_pool->next_free = 0;
456 static void release_rx_pools(struct ibmvnic_adapter *adapter)
458 struct ibmvnic_rx_pool *rx_pool;
461 if (!adapter->rx_pool)
464 for (i = 0; i < adapter->num_active_rx_pools; i++) {
465 rx_pool = &adapter->rx_pool[i];
467 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
469 kfree(rx_pool->free_map);
470 free_long_term_buff(adapter, &rx_pool->long_term_buff);
472 if (!rx_pool->rx_buff)
475 for (j = 0; j < rx_pool->size; j++) {
476 if (rx_pool->rx_buff[j].skb) {
477 dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
478 rx_pool->rx_buff[i].skb = NULL;
482 kfree(rx_pool->rx_buff);
485 kfree(adapter->rx_pool);
486 adapter->rx_pool = NULL;
487 adapter->num_active_rx_pools = 0;
490 static int init_rx_pools(struct net_device *netdev)
492 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
493 struct device *dev = &adapter->vdev->dev;
494 struct ibmvnic_rx_pool *rx_pool;
500 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
501 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
502 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
504 adapter->rx_pool = kcalloc(rxadd_subcrqs,
505 sizeof(struct ibmvnic_rx_pool),
507 if (!adapter->rx_pool) {
508 dev_err(dev, "Failed to allocate rx pools\n");
512 adapter->num_active_rx_pools = rxadd_subcrqs;
514 for (i = 0; i < rxadd_subcrqs; i++) {
515 rx_pool = &adapter->rx_pool[i];
517 netdev_dbg(adapter->netdev,
518 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
519 i, adapter->req_rx_add_entries_per_subcrq,
520 be64_to_cpu(size_array[i]));
522 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
524 rx_pool->buff_size = be64_to_cpu(size_array[i]);
527 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
529 if (!rx_pool->free_map) {
530 release_rx_pools(adapter);
534 rx_pool->rx_buff = kcalloc(rx_pool->size,
535 sizeof(struct ibmvnic_rx_buff),
537 if (!rx_pool->rx_buff) {
538 dev_err(dev, "Couldn't alloc rx buffers\n");
539 release_rx_pools(adapter);
543 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
544 rx_pool->size * rx_pool->buff_size)) {
545 release_rx_pools(adapter);
549 for (j = 0; j < rx_pool->size; ++j)
550 rx_pool->free_map[j] = j;
552 atomic_set(&rx_pool->available, 0);
553 rx_pool->next_alloc = 0;
554 rx_pool->next_free = 0;
560 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
562 struct ibmvnic_tx_pool *tx_pool;
566 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
567 for (i = 0; i < tx_scrqs; i++) {
568 netdev_dbg(adapter->netdev, "Re-setting tx_pool[%d]\n", i);
570 tx_pool = &adapter->tx_pool[i];
572 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
576 rc = reset_long_term_buff(adapter, &tx_pool->tso_ltb);
580 memset(tx_pool->tx_buff, 0,
581 adapter->req_tx_entries_per_subcrq *
582 sizeof(struct ibmvnic_tx_buff));
584 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
585 tx_pool->free_map[j] = j;
587 tx_pool->consumer_index = 0;
588 tx_pool->producer_index = 0;
589 tx_pool->tso_index = 0;
595 static void release_vpd_data(struct ibmvnic_adapter *adapter)
600 kfree(adapter->vpd->buff);
606 static void release_tx_pools(struct ibmvnic_adapter *adapter)
608 struct ibmvnic_tx_pool *tx_pool;
611 if (!adapter->tx_pool)
614 for (i = 0; i < adapter->num_active_tx_pools; i++) {
615 netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
616 tx_pool = &adapter->tx_pool[i];
617 kfree(tx_pool->tx_buff);
618 free_long_term_buff(adapter, &tx_pool->long_term_buff);
619 free_long_term_buff(adapter, &tx_pool->tso_ltb);
620 kfree(tx_pool->free_map);
623 kfree(adapter->tx_pool);
624 adapter->tx_pool = NULL;
625 adapter->num_active_tx_pools = 0;
628 static int init_tx_pools(struct net_device *netdev)
630 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
631 struct device *dev = &adapter->vdev->dev;
632 struct ibmvnic_tx_pool *tx_pool;
636 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
637 adapter->tx_pool = kcalloc(tx_subcrqs,
638 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
639 if (!adapter->tx_pool)
642 adapter->num_active_tx_pools = tx_subcrqs;
644 for (i = 0; i < tx_subcrqs; i++) {
645 tx_pool = &adapter->tx_pool[i];
647 netdev_dbg(adapter->netdev,
648 "Initializing tx_pool[%d], %lld buffs\n",
649 i, adapter->req_tx_entries_per_subcrq);
651 tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq,
652 sizeof(struct ibmvnic_tx_buff),
654 if (!tx_pool->tx_buff) {
655 dev_err(dev, "tx pool buffer allocation failed\n");
656 release_tx_pools(adapter);
660 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
661 adapter->req_tx_entries_per_subcrq *
662 (adapter->req_mtu + VLAN_HLEN))) {
663 release_tx_pools(adapter);
668 if (alloc_long_term_buff(adapter, &tx_pool->tso_ltb,
670 IBMVNIC_TSO_BUF_SZ)) {
671 release_tx_pools(adapter);
675 tx_pool->tso_index = 0;
677 tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
678 sizeof(int), GFP_KERNEL);
679 if (!tx_pool->free_map) {
680 release_tx_pools(adapter);
684 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
685 tx_pool->free_map[j] = j;
687 tx_pool->consumer_index = 0;
688 tx_pool->producer_index = 0;
694 static void release_error_buffers(struct ibmvnic_adapter *adapter)
696 struct device *dev = &adapter->vdev->dev;
697 struct ibmvnic_error_buff *error_buff, *tmp;
700 spin_lock_irqsave(&adapter->error_list_lock, flags);
701 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
702 list_del(&error_buff->list);
703 dma_unmap_single(dev, error_buff->dma, error_buff->len,
705 kfree(error_buff->buff);
708 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
711 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
715 if (adapter->napi_enabled)
718 for (i = 0; i < adapter->req_rx_queues; i++)
719 napi_enable(&adapter->napi[i]);
721 adapter->napi_enabled = true;
724 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
728 if (!adapter->napi_enabled)
731 for (i = 0; i < adapter->req_rx_queues; i++) {
732 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
733 napi_disable(&adapter->napi[i]);
736 adapter->napi_enabled = false;
739 static int init_napi(struct ibmvnic_adapter *adapter)
743 adapter->napi = kcalloc(adapter->req_rx_queues,
744 sizeof(struct napi_struct), GFP_KERNEL);
748 for (i = 0; i < adapter->req_rx_queues; i++) {
749 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
750 netif_napi_add(adapter->netdev, &adapter->napi[i],
751 ibmvnic_poll, NAPI_POLL_WEIGHT);
754 adapter->num_active_rx_napi = adapter->req_rx_queues;
758 static void release_napi(struct ibmvnic_adapter *adapter)
765 for (i = 0; i < adapter->num_active_rx_napi; i++) {
766 if (&adapter->napi[i]) {
767 netdev_dbg(adapter->netdev,
768 "Releasing napi[%d]\n", i);
769 netif_napi_del(&adapter->napi[i]);
773 kfree(adapter->napi);
774 adapter->napi = NULL;
775 adapter->num_active_rx_napi = 0;
778 static int ibmvnic_login(struct net_device *netdev)
780 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
781 unsigned long timeout = msecs_to_jiffies(30000);
782 struct device *dev = &adapter->vdev->dev;
786 if (adapter->renegotiate) {
787 adapter->renegotiate = false;
788 release_sub_crqs(adapter, 1);
790 reinit_completion(&adapter->init_done);
791 send_cap_queries(adapter);
792 if (!wait_for_completion_timeout(&adapter->init_done,
794 dev_err(dev, "Capabilities query timeout\n");
797 rc = init_sub_crqs(adapter);
800 "Initialization of SCRQ's failed\n");
803 rc = init_sub_crq_irqs(adapter);
806 "Initialization of SCRQ's irqs failed\n");
811 reinit_completion(&adapter->init_done);
812 rc = send_login(adapter);
814 dev_err(dev, "Unable to attempt device login\n");
816 } else if (!wait_for_completion_timeout(&adapter->init_done,
818 dev_err(dev, "Login timeout\n");
821 } while (adapter->renegotiate);
823 /* handle pending MAC address changes after successful login */
824 if (adapter->mac_change_pending) {
825 __ibmvnic_set_mac(netdev, &adapter->desired.mac);
826 adapter->mac_change_pending = false;
832 static void release_login_buffer(struct ibmvnic_adapter *adapter)
834 kfree(adapter->login_buf);
835 adapter->login_buf = NULL;
838 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
840 kfree(adapter->login_rsp_buf);
841 adapter->login_rsp_buf = NULL;
844 static void release_resources(struct ibmvnic_adapter *adapter)
846 release_vpd_data(adapter);
848 release_tx_pools(adapter);
849 release_rx_pools(adapter);
851 release_error_buffers(adapter);
852 release_napi(adapter);
853 release_login_rsp_buffer(adapter);
856 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
858 struct net_device *netdev = adapter->netdev;
859 unsigned long timeout = msecs_to_jiffies(30000);
860 union ibmvnic_crq crq;
864 netdev_dbg(netdev, "setting link state %d\n", link_state);
866 memset(&crq, 0, sizeof(crq));
867 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
868 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
869 crq.logical_link_state.link_state = link_state;
874 reinit_completion(&adapter->init_done);
875 rc = ibmvnic_send_crq(adapter, &crq);
877 netdev_err(netdev, "Failed to set link state\n");
881 if (!wait_for_completion_timeout(&adapter->init_done,
883 netdev_err(netdev, "timeout setting link state\n");
887 if (adapter->init_done_rc == 1) {
888 /* Partuial success, delay and re-send */
897 static int set_real_num_queues(struct net_device *netdev)
899 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
902 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
903 adapter->req_tx_queues, adapter->req_rx_queues);
905 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
907 netdev_err(netdev, "failed to set the number of tx queues\n");
911 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
913 netdev_err(netdev, "failed to set the number of rx queues\n");
918 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
920 struct device *dev = &adapter->vdev->dev;
921 union ibmvnic_crq crq;
924 if (adapter->vpd->buff)
925 len = adapter->vpd->len;
927 init_completion(&adapter->fw_done);
928 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
929 crq.get_vpd_size.cmd = GET_VPD_SIZE;
930 ibmvnic_send_crq(adapter, &crq);
931 wait_for_completion(&adapter->fw_done);
933 if (!adapter->vpd->len)
936 if (!adapter->vpd->buff)
937 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
938 else if (adapter->vpd->len != len)
940 krealloc(adapter->vpd->buff,
941 adapter->vpd->len, GFP_KERNEL);
943 if (!adapter->vpd->buff) {
944 dev_err(dev, "Could allocate VPD buffer\n");
948 adapter->vpd->dma_addr =
949 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
951 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
952 dev_err(dev, "Could not map VPD buffer\n");
953 kfree(adapter->vpd->buff);
954 adapter->vpd->buff = NULL;
958 reinit_completion(&adapter->fw_done);
959 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
960 crq.get_vpd.cmd = GET_VPD;
961 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
962 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
963 ibmvnic_send_crq(adapter, &crq);
964 wait_for_completion(&adapter->fw_done);
969 static int init_resources(struct ibmvnic_adapter *adapter)
971 struct net_device *netdev = adapter->netdev;
974 rc = set_real_num_queues(netdev);
978 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
982 /* Vital Product Data (VPD) */
983 rc = ibmvnic_get_vpd(adapter);
985 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
991 rc = init_napi(adapter);
995 send_map_query(adapter);
997 rc = init_rx_pools(netdev);
1001 rc = init_tx_pools(netdev);
1005 static int __ibmvnic_open(struct net_device *netdev)
1007 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1008 enum vnic_state prev_state = adapter->state;
1011 adapter->state = VNIC_OPENING;
1012 replenish_pools(adapter);
1013 ibmvnic_napi_enable(adapter);
1015 /* We're ready to receive frames, enable the sub-crq interrupts and
1016 * set the logical link state to up
1018 for (i = 0; i < adapter->req_rx_queues; i++) {
1019 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1020 if (prev_state == VNIC_CLOSED)
1021 enable_irq(adapter->rx_scrq[i]->irq);
1023 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1026 for (i = 0; i < adapter->req_tx_queues; i++) {
1027 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1028 if (prev_state == VNIC_CLOSED)
1029 enable_irq(adapter->tx_scrq[i]->irq);
1031 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1034 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1036 for (i = 0; i < adapter->req_rx_queues; i++)
1037 napi_disable(&adapter->napi[i]);
1038 release_resources(adapter);
1042 netif_tx_start_all_queues(netdev);
1044 if (prev_state == VNIC_CLOSED) {
1045 for (i = 0; i < adapter->req_rx_queues; i++)
1046 napi_schedule(&adapter->napi[i]);
1049 adapter->state = VNIC_OPEN;
1053 static int ibmvnic_open(struct net_device *netdev)
1055 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1058 mutex_lock(&adapter->reset_lock);
1060 if (adapter->state != VNIC_CLOSED) {
1061 rc = ibmvnic_login(netdev);
1063 mutex_unlock(&adapter->reset_lock);
1067 rc = init_resources(adapter);
1069 netdev_err(netdev, "failed to initialize resources\n");
1070 release_resources(adapter);
1071 mutex_unlock(&adapter->reset_lock);
1076 rc = __ibmvnic_open(netdev);
1077 netif_carrier_on(netdev);
1079 mutex_unlock(&adapter->reset_lock);
1084 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1086 struct ibmvnic_rx_pool *rx_pool;
1087 struct ibmvnic_rx_buff *rx_buff;
1092 if (!adapter->rx_pool)
1095 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
1096 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1098 /* Free any remaining skbs in the rx buffer pools */
1099 for (i = 0; i < rx_scrqs; i++) {
1100 rx_pool = &adapter->rx_pool[i];
1101 if (!rx_pool || !rx_pool->rx_buff)
1104 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1105 for (j = 0; j < rx_entries; j++) {
1106 rx_buff = &rx_pool->rx_buff[j];
1107 if (rx_buff && rx_buff->skb) {
1108 dev_kfree_skb_any(rx_buff->skb);
1109 rx_buff->skb = NULL;
1115 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1117 struct ibmvnic_tx_pool *tx_pool;
1118 struct ibmvnic_tx_buff *tx_buff;
1123 if (!adapter->tx_pool)
1126 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
1127 tx_entries = adapter->req_tx_entries_per_subcrq;
1129 /* Free any remaining skbs in the tx buffer pools */
1130 for (i = 0; i < tx_scrqs; i++) {
1131 tx_pool = &adapter->tx_pool[i];
1132 if (!tx_pool && !tx_pool->tx_buff)
1135 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1136 for (j = 0; j < tx_entries; j++) {
1137 tx_buff = &tx_pool->tx_buff[j];
1138 if (tx_buff && tx_buff->skb) {
1139 dev_kfree_skb_any(tx_buff->skb);
1140 tx_buff->skb = NULL;
1146 static void ibmvnic_cleanup(struct net_device *netdev)
1148 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1151 /* ensure that transmissions are stopped if called by do_reset */
1152 if (adapter->resetting)
1153 netif_tx_disable(netdev);
1155 netif_tx_stop_all_queues(netdev);
1157 ibmvnic_napi_disable(adapter);
1159 if (adapter->tx_scrq) {
1160 for (i = 0; i < adapter->req_tx_queues; i++)
1161 if (adapter->tx_scrq[i]->irq) {
1163 "Disabling tx_scrq[%d] irq\n", i);
1164 disable_irq(adapter->tx_scrq[i]->irq);
1168 if (adapter->rx_scrq) {
1169 for (i = 0; i < adapter->req_rx_queues; i++) {
1170 if (adapter->rx_scrq[i]->irq) {
1172 "Disabling rx_scrq[%d] irq\n", i);
1173 disable_irq(adapter->rx_scrq[i]->irq);
1177 clean_rx_pools(adapter);
1178 clean_tx_pools(adapter);
1181 static int __ibmvnic_close(struct net_device *netdev)
1183 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1186 adapter->state = VNIC_CLOSING;
1187 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1190 ibmvnic_cleanup(netdev);
1191 adapter->state = VNIC_CLOSED;
1195 static int ibmvnic_close(struct net_device *netdev)
1197 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1200 mutex_lock(&adapter->reset_lock);
1201 rc = __ibmvnic_close(netdev);
1202 mutex_unlock(&adapter->reset_lock);
1208 * build_hdr_data - creates L2/L3/L4 header data buffer
1209 * @hdr_field - bitfield determining needed headers
1210 * @skb - socket buffer
1211 * @hdr_len - array of header lengths
1212 * @tot_len - total length of data
1214 * Reads hdr_field to determine which headers are needed by firmware.
1215 * Builds a buffer containing these headers. Saves individual header
1216 * lengths and total buffer length to be used to build descriptors.
1218 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1219 int *hdr_len, u8 *hdr_data)
1224 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1225 hdr_len[0] = sizeof(struct vlan_ethhdr);
1227 hdr_len[0] = sizeof(struct ethhdr);
1229 if (skb->protocol == htons(ETH_P_IP)) {
1230 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1231 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1232 hdr_len[2] = tcp_hdrlen(skb);
1233 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1234 hdr_len[2] = sizeof(struct udphdr);
1235 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1236 hdr_len[1] = sizeof(struct ipv6hdr);
1237 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1238 hdr_len[2] = tcp_hdrlen(skb);
1239 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1240 hdr_len[2] = sizeof(struct udphdr);
1241 } else if (skb->protocol == htons(ETH_P_ARP)) {
1242 hdr_len[1] = arp_hdr_len(skb->dev);
1246 memset(hdr_data, 0, 120);
1247 if ((hdr_field >> 6) & 1) {
1248 hdr = skb_mac_header(skb);
1249 memcpy(hdr_data, hdr, hdr_len[0]);
1253 if ((hdr_field >> 5) & 1) {
1254 hdr = skb_network_header(skb);
1255 memcpy(hdr_data + len, hdr, hdr_len[1]);
1259 if ((hdr_field >> 4) & 1) {
1260 hdr = skb_transport_header(skb);
1261 memcpy(hdr_data + len, hdr, hdr_len[2]);
1268 * create_hdr_descs - create header and header extension descriptors
1269 * @hdr_field - bitfield determining needed headers
1270 * @data - buffer containing header data
1271 * @len - length of data buffer
1272 * @hdr_len - array of individual header lengths
1273 * @scrq_arr - descriptor array
1275 * Creates header and, if needed, header extension descriptors and
1276 * places them in a descriptor array, scrq_arr
1279 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1280 union sub_crq *scrq_arr)
1282 union sub_crq hdr_desc;
1288 while (tmp_len > 0) {
1289 cur = hdr_data + len - tmp_len;
1291 memset(&hdr_desc, 0, sizeof(hdr_desc));
1292 if (cur != hdr_data) {
1293 data = hdr_desc.hdr_ext.data;
1294 tmp = tmp_len > 29 ? 29 : tmp_len;
1295 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1296 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1297 hdr_desc.hdr_ext.len = tmp;
1299 data = hdr_desc.hdr.data;
1300 tmp = tmp_len > 24 ? 24 : tmp_len;
1301 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1302 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1303 hdr_desc.hdr.len = tmp;
1304 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1305 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1306 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1307 hdr_desc.hdr.flag = hdr_field << 1;
1309 memcpy(data, cur, tmp);
1311 *scrq_arr = hdr_desc;
1320 * build_hdr_descs_arr - build a header descriptor array
1321 * @skb - socket buffer
1322 * @num_entries - number of descriptors to be sent
1323 * @subcrq - first TX descriptor
1324 * @hdr_field - bit field determining which headers will be sent
1326 * This function will build a TX descriptor array with applicable
1327 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1330 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1331 int *num_entries, u8 hdr_field)
1333 int hdr_len[3] = {0, 0, 0};
1335 u8 *hdr_data = txbuff->hdr_data;
1337 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1339 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1340 txbuff->indir_arr + 1);
1343 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1345 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1346 int queue_num = skb_get_queue_mapping(skb);
1347 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1348 struct device *dev = &adapter->vdev->dev;
1349 struct ibmvnic_tx_buff *tx_buff = NULL;
1350 struct ibmvnic_sub_crq_queue *tx_scrq;
1351 struct ibmvnic_tx_pool *tx_pool;
1352 unsigned int tx_send_failed = 0;
1353 unsigned int tx_map_failed = 0;
1354 unsigned int tx_dropped = 0;
1355 unsigned int tx_packets = 0;
1356 unsigned int tx_bytes = 0;
1357 dma_addr_t data_dma_addr;
1358 struct netdev_queue *txq;
1359 unsigned long lpar_rc;
1360 union sub_crq tx_crq;
1361 unsigned int offset;
1362 int num_entries = 1;
1369 if (adapter->resetting) {
1370 if (!netif_subqueue_stopped(netdev, skb))
1371 netif_stop_subqueue(netdev, queue_num);
1372 dev_kfree_skb_any(skb);
1380 tx_pool = &adapter->tx_pool[queue_num];
1381 tx_scrq = adapter->tx_scrq[queue_num];
1382 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1383 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1384 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1386 index = tx_pool->free_map[tx_pool->consumer_index];
1388 if (skb_is_gso(skb)) {
1389 offset = tx_pool->tso_index * IBMVNIC_TSO_BUF_SZ;
1390 dst = tx_pool->tso_ltb.buff + offset;
1391 memset(dst, 0, IBMVNIC_TSO_BUF_SZ);
1392 data_dma_addr = tx_pool->tso_ltb.addr + offset;
1393 tx_pool->tso_index++;
1394 if (tx_pool->tso_index == IBMVNIC_TSO_BUFS)
1395 tx_pool->tso_index = 0;
1397 offset = index * (adapter->req_mtu + VLAN_HLEN);
1398 dst = tx_pool->long_term_buff.buff + offset;
1399 memset(dst, 0, adapter->req_mtu + VLAN_HLEN);
1400 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1403 if (skb_shinfo(skb)->nr_frags) {
1407 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1408 cur = skb_headlen(skb);
1410 /* Copy the frags */
1411 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1412 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1415 page_address(skb_frag_page(frag)) +
1416 frag->page_offset, skb_frag_size(frag));
1417 cur += skb_frag_size(frag);
1420 skb_copy_from_linear_data(skb, dst, skb->len);
1423 tx_pool->consumer_index =
1424 (tx_pool->consumer_index + 1) %
1425 adapter->req_tx_entries_per_subcrq;
1427 tx_buff = &tx_pool->tx_buff[index];
1429 tx_buff->data_dma[0] = data_dma_addr;
1430 tx_buff->data_len[0] = skb->len;
1431 tx_buff->index = index;
1432 tx_buff->pool_index = queue_num;
1433 tx_buff->last_frag = true;
1435 memset(&tx_crq, 0, sizeof(tx_crq));
1436 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1437 tx_crq.v1.type = IBMVNIC_TX_DESC;
1438 tx_crq.v1.n_crq_elem = 1;
1439 tx_crq.v1.n_sge = 1;
1440 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1441 tx_crq.v1.correlator = cpu_to_be32(index);
1442 if (skb_is_gso(skb))
1443 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->tso_ltb.map_id);
1445 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1446 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1447 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1449 if (adapter->vlan_header_insertion) {
1450 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1451 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1454 if (skb->protocol == htons(ETH_P_IP)) {
1455 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1456 proto = ip_hdr(skb)->protocol;
1457 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1458 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1459 proto = ipv6_hdr(skb)->nexthdr;
1462 if (proto == IPPROTO_TCP)
1463 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1464 else if (proto == IPPROTO_UDP)
1465 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1467 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1468 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1471 if (skb_is_gso(skb)) {
1472 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1473 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1476 /* determine if l2/3/4 headers are sent to firmware */
1477 if ((*hdrs >> 7) & 1) {
1478 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1479 tx_crq.v1.n_crq_elem = num_entries;
1480 tx_buff->num_entries = num_entries;
1481 tx_buff->indir_arr[0] = tx_crq;
1482 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1483 sizeof(tx_buff->indir_arr),
1485 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1486 dev_kfree_skb_any(skb);
1487 tx_buff->skb = NULL;
1488 if (!firmware_has_feature(FW_FEATURE_CMO))
1489 dev_err(dev, "tx: unable to map descriptor array\n");
1495 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1496 (u64)tx_buff->indir_dma,
1499 tx_buff->num_entries = num_entries;
1500 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1503 if (lpar_rc != H_SUCCESS) {
1504 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
1506 if (tx_pool->consumer_index == 0)
1507 tx_pool->consumer_index =
1508 adapter->req_tx_entries_per_subcrq - 1;
1510 tx_pool->consumer_index--;
1512 dev_kfree_skb_any(skb);
1513 tx_buff->skb = NULL;
1515 if (lpar_rc == H_CLOSED) {
1516 /* Disable TX and report carrier off if queue is closed.
1517 * Firmware guarantees that a signal will be sent to the
1518 * driver, triggering a reset or some other action.
1520 netif_tx_stop_all_queues(netdev);
1521 netif_carrier_off(netdev);
1530 if (atomic_add_return(num_entries, &tx_scrq->used)
1531 >= adapter->req_tx_entries_per_subcrq) {
1532 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1533 netif_stop_subqueue(netdev, queue_num);
1537 tx_bytes += skb->len;
1538 txq->trans_start = jiffies;
1542 netdev->stats.tx_dropped += tx_dropped;
1543 netdev->stats.tx_bytes += tx_bytes;
1544 netdev->stats.tx_packets += tx_packets;
1545 adapter->tx_send_failed += tx_send_failed;
1546 adapter->tx_map_failed += tx_map_failed;
1547 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1548 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1549 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1554 static void ibmvnic_set_multi(struct net_device *netdev)
1556 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1557 struct netdev_hw_addr *ha;
1558 union ibmvnic_crq crq;
1560 memset(&crq, 0, sizeof(crq));
1561 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1562 crq.request_capability.cmd = REQUEST_CAPABILITY;
1564 if (netdev->flags & IFF_PROMISC) {
1565 if (!adapter->promisc_supported)
1568 if (netdev->flags & IFF_ALLMULTI) {
1569 /* Accept all multicast */
1570 memset(&crq, 0, sizeof(crq));
1571 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1572 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1573 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1574 ibmvnic_send_crq(adapter, &crq);
1575 } else if (netdev_mc_empty(netdev)) {
1576 /* Reject all multicast */
1577 memset(&crq, 0, sizeof(crq));
1578 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1579 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1580 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1581 ibmvnic_send_crq(adapter, &crq);
1583 /* Accept one or more multicast(s) */
1584 netdev_for_each_mc_addr(ha, netdev) {
1585 memset(&crq, 0, sizeof(crq));
1586 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1587 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1588 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1589 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1591 ibmvnic_send_crq(adapter, &crq);
1597 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
1599 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1600 struct sockaddr *addr = p;
1601 union ibmvnic_crq crq;
1603 if (!is_valid_ether_addr(addr->sa_data))
1604 return -EADDRNOTAVAIL;
1606 memset(&crq, 0, sizeof(crq));
1607 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1608 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1609 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
1611 init_completion(&adapter->fw_done);
1612 ibmvnic_send_crq(adapter, &crq);
1613 wait_for_completion(&adapter->fw_done);
1614 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1615 return adapter->fw_done_rc ? -EIO : 0;
1618 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1620 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1621 struct sockaddr *addr = p;
1624 if (adapter->state == VNIC_PROBED) {
1625 memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
1626 adapter->mac_change_pending = true;
1630 rc = __ibmvnic_set_mac(netdev, addr);
1636 * do_reset returns zero if we are able to keep processing reset events, or
1637 * non-zero if we hit a fatal error and must halt.
1639 static int do_reset(struct ibmvnic_adapter *adapter,
1640 struct ibmvnic_rwi *rwi, u32 reset_state)
1642 u64 old_num_rx_queues, old_num_tx_queues;
1643 struct net_device *netdev = adapter->netdev;
1646 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1649 netif_carrier_off(netdev);
1650 adapter->reset_reason = rwi->reset_reason;
1652 old_num_rx_queues = adapter->req_rx_queues;
1653 old_num_tx_queues = adapter->req_tx_queues;
1655 if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
1656 rc = ibmvnic_reenable_crq_queue(adapter);
1659 ibmvnic_cleanup(netdev);
1660 } else if (rwi->reset_reason == VNIC_RESET_FAILOVER) {
1661 ibmvnic_cleanup(netdev);
1663 rc = __ibmvnic_close(netdev);
1668 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1669 adapter->wait_for_reset) {
1670 release_resources(adapter);
1671 release_sub_crqs(adapter, 1);
1672 release_crq_queue(adapter);
1675 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1676 /* remove the closed state so when we call open it appears
1677 * we are coming from the probed state.
1679 adapter->state = VNIC_PROBED;
1681 rc = ibmvnic_init(adapter);
1683 return IBMVNIC_INIT_FAILED;
1685 /* If the adapter was in PROBE state prior to the reset,
1688 if (reset_state == VNIC_PROBED)
1691 rc = ibmvnic_login(netdev);
1693 adapter->state = VNIC_PROBED;
1697 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1698 adapter->wait_for_reset) {
1699 rc = init_resources(adapter);
1702 } else if (adapter->req_rx_queues != old_num_rx_queues ||
1703 adapter->req_tx_queues != old_num_tx_queues) {
1704 adapter->map_id = 1;
1705 release_rx_pools(adapter);
1706 release_tx_pools(adapter);
1707 init_rx_pools(netdev);
1708 init_tx_pools(netdev);
1710 release_napi(adapter);
1713 rc = reset_tx_pools(adapter);
1717 rc = reset_rx_pools(adapter);
1721 if (reset_state == VNIC_CLOSED)
1726 rc = __ibmvnic_open(netdev);
1728 if (list_empty(&adapter->rwi_list))
1729 adapter->state = VNIC_CLOSED;
1731 adapter->state = reset_state;
1737 for (i = 0; i < adapter->req_rx_queues; i++)
1738 napi_schedule(&adapter->napi[i]);
1740 if (adapter->reset_reason != VNIC_RESET_FAILOVER)
1741 netdev_notify_peers(netdev);
1743 netif_carrier_on(netdev);
1748 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1750 struct ibmvnic_rwi *rwi;
1752 mutex_lock(&adapter->rwi_lock);
1754 if (!list_empty(&adapter->rwi_list)) {
1755 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1757 list_del(&rwi->list);
1762 mutex_unlock(&adapter->rwi_lock);
1766 static void free_all_rwi(struct ibmvnic_adapter *adapter)
1768 struct ibmvnic_rwi *rwi;
1770 rwi = get_next_rwi(adapter);
1773 rwi = get_next_rwi(adapter);
1777 static void __ibmvnic_reset(struct work_struct *work)
1779 struct ibmvnic_rwi *rwi;
1780 struct ibmvnic_adapter *adapter;
1781 struct net_device *netdev;
1785 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1786 netdev = adapter->netdev;
1788 mutex_lock(&adapter->reset_lock);
1789 adapter->resetting = true;
1790 reset_state = adapter->state;
1792 rwi = get_next_rwi(adapter);
1794 rc = do_reset(adapter, rwi, reset_state);
1796 if (rc && rc != IBMVNIC_INIT_FAILED)
1799 rwi = get_next_rwi(adapter);
1802 if (adapter->wait_for_reset) {
1803 adapter->wait_for_reset = false;
1804 adapter->reset_done_rc = rc;
1805 complete(&adapter->reset_done);
1809 netdev_dbg(adapter->netdev, "Reset failed\n");
1810 free_all_rwi(adapter);
1811 mutex_unlock(&adapter->reset_lock);
1815 adapter->resetting = false;
1816 mutex_unlock(&adapter->reset_lock);
1819 static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
1820 enum ibmvnic_reset_reason reason)
1822 struct ibmvnic_rwi *rwi, *tmp;
1823 struct net_device *netdev = adapter->netdev;
1824 struct list_head *entry;
1826 if (adapter->state == VNIC_REMOVING ||
1827 adapter->state == VNIC_REMOVED) {
1828 netdev_dbg(netdev, "Adapter removing, skipping reset\n");
1832 if (adapter->state == VNIC_PROBING) {
1833 netdev_warn(netdev, "Adapter reset during probe\n");
1834 adapter->init_done_rc = EAGAIN;
1838 mutex_lock(&adapter->rwi_lock);
1840 list_for_each(entry, &adapter->rwi_list) {
1841 tmp = list_entry(entry, struct ibmvnic_rwi, list);
1842 if (tmp->reset_reason == reason) {
1843 netdev_dbg(netdev, "Skipping matching reset\n");
1844 mutex_unlock(&adapter->rwi_lock);
1849 rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
1851 mutex_unlock(&adapter->rwi_lock);
1852 ibmvnic_close(netdev);
1856 rwi->reset_reason = reason;
1857 list_add_tail(&rwi->list, &adapter->rwi_list);
1858 mutex_unlock(&adapter->rwi_lock);
1860 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
1861 schedule_work(&adapter->ibmvnic_reset);
1864 static void ibmvnic_tx_timeout(struct net_device *dev)
1866 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1868 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
1871 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
1872 struct ibmvnic_rx_buff *rx_buff)
1874 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
1876 rx_buff->skb = NULL;
1878 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
1879 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
1881 atomic_dec(&pool->available);
1884 static int ibmvnic_poll(struct napi_struct *napi, int budget)
1886 struct net_device *netdev = napi->dev;
1887 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1888 int scrq_num = (int)(napi - adapter->napi);
1889 int frames_processed = 0;
1892 while (frames_processed < budget) {
1893 struct sk_buff *skb;
1894 struct ibmvnic_rx_buff *rx_buff;
1895 union sub_crq *next;
1900 if (unlikely(adapter->resetting &&
1901 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
1902 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1903 napi_complete_done(napi, frames_processed);
1904 return frames_processed;
1907 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
1909 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
1911 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
1912 rx_comp.correlator);
1913 /* do error checking */
1914 if (next->rx_comp.rc) {
1915 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
1916 be16_to_cpu(next->rx_comp.rc));
1917 /* free the entry */
1918 next->rx_comp.first = 0;
1919 dev_kfree_skb_any(rx_buff->skb);
1920 remove_buff_from_pool(adapter, rx_buff);
1922 } else if (!rx_buff->skb) {
1923 /* free the entry */
1924 next->rx_comp.first = 0;
1925 remove_buff_from_pool(adapter, rx_buff);
1929 length = be32_to_cpu(next->rx_comp.len);
1930 offset = be16_to_cpu(next->rx_comp.off_frame_data);
1931 flags = next->rx_comp.flags;
1933 skb_copy_to_linear_data(skb, rx_buff->data + offset,
1936 /* VLAN Header has been stripped by the system firmware and
1937 * needs to be inserted by the driver
1939 if (adapter->rx_vlan_header_insertion &&
1940 (flags & IBMVNIC_VLAN_STRIPPED))
1941 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1942 ntohs(next->rx_comp.vlan_tci));
1944 /* free the entry */
1945 next->rx_comp.first = 0;
1946 remove_buff_from_pool(adapter, rx_buff);
1948 skb_put(skb, length);
1949 skb->protocol = eth_type_trans(skb, netdev);
1950 skb_record_rx_queue(skb, scrq_num);
1952 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
1953 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
1954 skb->ip_summed = CHECKSUM_UNNECESSARY;
1958 napi_gro_receive(napi, skb); /* send it up */
1959 netdev->stats.rx_packets++;
1960 netdev->stats.rx_bytes += length;
1961 adapter->rx_stats_buffers[scrq_num].packets++;
1962 adapter->rx_stats_buffers[scrq_num].bytes += length;
1966 if (adapter->state != VNIC_CLOSING)
1967 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
1969 if (frames_processed < budget) {
1970 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1971 napi_complete_done(napi, frames_processed);
1972 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1973 napi_reschedule(napi)) {
1974 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1978 return frames_processed;
1981 #ifdef CONFIG_NET_POLL_CONTROLLER
1982 static void ibmvnic_netpoll_controller(struct net_device *dev)
1984 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1987 replenish_pools(netdev_priv(dev));
1988 for (i = 0; i < adapter->req_rx_queues; i++)
1989 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1990 adapter->rx_scrq[i]);
1994 static int wait_for_reset(struct ibmvnic_adapter *adapter)
1996 adapter->fallback.mtu = adapter->req_mtu;
1997 adapter->fallback.rx_queues = adapter->req_rx_queues;
1998 adapter->fallback.tx_queues = adapter->req_tx_queues;
1999 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2000 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2002 init_completion(&adapter->reset_done);
2003 ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2004 adapter->wait_for_reset = true;
2005 wait_for_completion(&adapter->reset_done);
2007 if (adapter->reset_done_rc) {
2008 adapter->desired.mtu = adapter->fallback.mtu;
2009 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2010 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2011 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2012 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2014 init_completion(&adapter->reset_done);
2015 ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2016 wait_for_completion(&adapter->reset_done);
2018 adapter->wait_for_reset = false;
2020 return adapter->reset_done_rc;
2023 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2025 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2027 adapter->desired.mtu = new_mtu + ETH_HLEN;
2029 return wait_for_reset(adapter);
2032 static const struct net_device_ops ibmvnic_netdev_ops = {
2033 .ndo_open = ibmvnic_open,
2034 .ndo_stop = ibmvnic_close,
2035 .ndo_start_xmit = ibmvnic_xmit,
2036 .ndo_set_rx_mode = ibmvnic_set_multi,
2037 .ndo_set_mac_address = ibmvnic_set_mac,
2038 .ndo_validate_addr = eth_validate_addr,
2039 .ndo_tx_timeout = ibmvnic_tx_timeout,
2040 #ifdef CONFIG_NET_POLL_CONTROLLER
2041 .ndo_poll_controller = ibmvnic_netpoll_controller,
2043 .ndo_change_mtu = ibmvnic_change_mtu,
2046 /* ethtool functions */
2048 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2049 struct ethtool_link_ksettings *cmd)
2051 u32 supported, advertising;
2053 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
2055 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
2057 cmd->base.speed = SPEED_1000;
2058 cmd->base.duplex = DUPLEX_FULL;
2059 cmd->base.port = PORT_FIBRE;
2060 cmd->base.phy_address = 0;
2061 cmd->base.autoneg = AUTONEG_ENABLE;
2063 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2065 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2071 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2072 struct ethtool_drvinfo *info)
2074 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2076 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2077 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2078 strlcpy(info->fw_version, adapter->fw_version,
2079 sizeof(info->fw_version));
2082 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2084 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2086 return adapter->msg_enable;
2089 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2091 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2093 adapter->msg_enable = data;
2096 static u32 ibmvnic_get_link(struct net_device *netdev)
2098 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2100 /* Don't need to send a query because we request a logical link up at
2101 * init and then we wait for link state indications
2103 return adapter->logical_link_state;
2106 static void ibmvnic_get_ringparam(struct net_device *netdev,
2107 struct ethtool_ringparam *ring)
2109 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2111 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2112 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2113 ring->rx_mini_max_pending = 0;
2114 ring->rx_jumbo_max_pending = 0;
2115 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2116 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2117 ring->rx_mini_pending = 0;
2118 ring->rx_jumbo_pending = 0;
2121 static int ibmvnic_set_ringparam(struct net_device *netdev,
2122 struct ethtool_ringparam *ring)
2124 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2126 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
2127 ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
2128 netdev_err(netdev, "Invalid request.\n");
2129 netdev_err(netdev, "Max tx buffers = %llu\n",
2130 adapter->max_rx_add_entries_per_subcrq);
2131 netdev_err(netdev, "Max rx buffers = %llu\n",
2132 adapter->max_tx_entries_per_subcrq);
2136 adapter->desired.rx_entries = ring->rx_pending;
2137 adapter->desired.tx_entries = ring->tx_pending;
2139 return wait_for_reset(adapter);
2142 static void ibmvnic_get_channels(struct net_device *netdev,
2143 struct ethtool_channels *channels)
2145 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2147 channels->max_rx = adapter->max_rx_queues;
2148 channels->max_tx = adapter->max_tx_queues;
2149 channels->max_other = 0;
2150 channels->max_combined = 0;
2151 channels->rx_count = adapter->req_rx_queues;
2152 channels->tx_count = adapter->req_tx_queues;
2153 channels->other_count = 0;
2154 channels->combined_count = 0;
2157 static int ibmvnic_set_channels(struct net_device *netdev,
2158 struct ethtool_channels *channels)
2160 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2162 adapter->desired.rx_queues = channels->rx_count;
2163 adapter->desired.tx_queues = channels->tx_count;
2165 return wait_for_reset(adapter);
2168 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2170 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2173 if (stringset != ETH_SS_STATS)
2176 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
2177 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2179 for (i = 0; i < adapter->req_tx_queues; i++) {
2180 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2181 data += ETH_GSTRING_LEN;
2183 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2184 data += ETH_GSTRING_LEN;
2186 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
2187 data += ETH_GSTRING_LEN;
2190 for (i = 0; i < adapter->req_rx_queues; i++) {
2191 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2192 data += ETH_GSTRING_LEN;
2194 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2195 data += ETH_GSTRING_LEN;
2197 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2198 data += ETH_GSTRING_LEN;
2202 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2204 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2208 return ARRAY_SIZE(ibmvnic_stats) +
2209 adapter->req_tx_queues * NUM_TX_STATS +
2210 adapter->req_rx_queues * NUM_RX_STATS;
2216 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2217 struct ethtool_stats *stats, u64 *data)
2219 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2220 union ibmvnic_crq crq;
2223 memset(&crq, 0, sizeof(crq));
2224 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2225 crq.request_statistics.cmd = REQUEST_STATISTICS;
2226 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2227 crq.request_statistics.len =
2228 cpu_to_be32(sizeof(struct ibmvnic_statistics));
2230 /* Wait for data to be written */
2231 init_completion(&adapter->stats_done);
2232 ibmvnic_send_crq(adapter, &crq);
2233 wait_for_completion(&adapter->stats_done);
2235 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2236 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2237 ibmvnic_stats[i].offset));
2239 for (j = 0; j < adapter->req_tx_queues; j++) {
2240 data[i] = adapter->tx_stats_buffers[j].packets;
2242 data[i] = adapter->tx_stats_buffers[j].bytes;
2244 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2248 for (j = 0; j < adapter->req_rx_queues; j++) {
2249 data[i] = adapter->rx_stats_buffers[j].packets;
2251 data[i] = adapter->rx_stats_buffers[j].bytes;
2253 data[i] = adapter->rx_stats_buffers[j].interrupts;
2258 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2259 .get_drvinfo = ibmvnic_get_drvinfo,
2260 .get_msglevel = ibmvnic_get_msglevel,
2261 .set_msglevel = ibmvnic_set_msglevel,
2262 .get_link = ibmvnic_get_link,
2263 .get_ringparam = ibmvnic_get_ringparam,
2264 .set_ringparam = ibmvnic_set_ringparam,
2265 .get_channels = ibmvnic_get_channels,
2266 .set_channels = ibmvnic_set_channels,
2267 .get_strings = ibmvnic_get_strings,
2268 .get_sset_count = ibmvnic_get_sset_count,
2269 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
2270 .get_link_ksettings = ibmvnic_get_link_ksettings,
2273 /* Routines for managing CRQs/sCRQs */
2275 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2276 struct ibmvnic_sub_crq_queue *scrq)
2281 free_irq(scrq->irq, scrq);
2282 irq_dispose_mapping(scrq->irq);
2286 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2289 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2290 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2294 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2298 for (i = 0; i < adapter->req_tx_queues; i++) {
2299 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2300 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2305 for (i = 0; i < adapter->req_rx_queues; i++) {
2306 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2307 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2315 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2316 struct ibmvnic_sub_crq_queue *scrq,
2319 struct device *dev = &adapter->vdev->dev;
2322 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2325 /* Close the sub-crqs */
2327 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2328 adapter->vdev->unit_address,
2330 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2333 netdev_err(adapter->netdev,
2334 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2339 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2341 free_pages((unsigned long)scrq->msgs, 2);
2345 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2348 struct device *dev = &adapter->vdev->dev;
2349 struct ibmvnic_sub_crq_queue *scrq;
2352 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2357 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2359 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2360 goto zero_page_failed;
2363 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2365 if (dma_mapping_error(dev, scrq->msg_token)) {
2366 dev_warn(dev, "Couldn't map crq queue messages page\n");
2370 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2371 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2373 if (rc == H_RESOURCE)
2374 rc = ibmvnic_reset_crq(adapter);
2376 if (rc == H_CLOSED) {
2377 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2379 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2383 scrq->adapter = adapter;
2384 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2385 spin_lock_init(&scrq->lock);
2387 netdev_dbg(adapter->netdev,
2388 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2389 scrq->crq_num, scrq->hw_irq, scrq->irq);
2394 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2397 free_pages((unsigned long)scrq->msgs, 2);
2404 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2408 if (adapter->tx_scrq) {
2409 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2410 if (!adapter->tx_scrq[i])
2413 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2415 if (adapter->tx_scrq[i]->irq) {
2416 free_irq(adapter->tx_scrq[i]->irq,
2417 adapter->tx_scrq[i]);
2418 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2419 adapter->tx_scrq[i]->irq = 0;
2422 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2426 kfree(adapter->tx_scrq);
2427 adapter->tx_scrq = NULL;
2428 adapter->num_active_tx_scrqs = 0;
2431 if (adapter->rx_scrq) {
2432 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
2433 if (!adapter->rx_scrq[i])
2436 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2438 if (adapter->rx_scrq[i]->irq) {
2439 free_irq(adapter->rx_scrq[i]->irq,
2440 adapter->rx_scrq[i]);
2441 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2442 adapter->rx_scrq[i]->irq = 0;
2445 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2449 kfree(adapter->rx_scrq);
2450 adapter->rx_scrq = NULL;
2451 adapter->num_active_rx_scrqs = 0;
2455 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2456 struct ibmvnic_sub_crq_queue *scrq)
2458 struct device *dev = &adapter->vdev->dev;
2461 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2462 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2464 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2469 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2470 struct ibmvnic_sub_crq_queue *scrq)
2472 struct device *dev = &adapter->vdev->dev;
2475 if (scrq->hw_irq > 0x100000000ULL) {
2476 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2480 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2481 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2483 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2488 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2489 struct ibmvnic_sub_crq_queue *scrq)
2491 struct device *dev = &adapter->vdev->dev;
2492 struct ibmvnic_tx_buff *txbuff;
2493 union sub_crq *next;
2499 while (pending_scrq(adapter, scrq)) {
2500 unsigned int pool = scrq->pool_index;
2501 int num_entries = 0;
2503 next = ibmvnic_next_scrq(adapter, scrq);
2504 for (i = 0; i < next->tx_comp.num_comps; i++) {
2505 if (next->tx_comp.rcs[i]) {
2506 dev_err(dev, "tx error %x\n",
2507 next->tx_comp.rcs[i]);
2510 index = be32_to_cpu(next->tx_comp.correlators[i]);
2511 txbuff = &adapter->tx_pool[pool].tx_buff[index];
2513 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2514 if (!txbuff->data_dma[j])
2517 txbuff->data_dma[j] = 0;
2519 /* if sub_crq was sent indirectly */
2520 first = txbuff->indir_arr[0].generic.first;
2521 if (first == IBMVNIC_CRQ_CMD) {
2522 dma_unmap_single(dev, txbuff->indir_dma,
2523 sizeof(txbuff->indir_arr),
2527 if (txbuff->last_frag) {
2528 dev_kfree_skb_any(txbuff->skb);
2532 num_entries += txbuff->num_entries;
2534 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
2535 producer_index] = index;
2536 adapter->tx_pool[pool].producer_index =
2537 (adapter->tx_pool[pool].producer_index + 1) %
2538 adapter->req_tx_entries_per_subcrq;
2540 /* remove tx_comp scrq*/
2541 next->tx_comp.first = 0;
2543 if (atomic_sub_return(num_entries, &scrq->used) <=
2544 (adapter->req_tx_entries_per_subcrq / 2) &&
2545 __netif_subqueue_stopped(adapter->netdev,
2546 scrq->pool_index)) {
2547 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
2548 netdev_dbg(adapter->netdev, "Started queue %d\n",
2553 enable_scrq_irq(adapter, scrq);
2555 if (pending_scrq(adapter, scrq)) {
2556 disable_scrq_irq(adapter, scrq);
2563 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
2565 struct ibmvnic_sub_crq_queue *scrq = instance;
2566 struct ibmvnic_adapter *adapter = scrq->adapter;
2568 disable_scrq_irq(adapter, scrq);
2569 ibmvnic_complete_tx(adapter, scrq);
2574 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2576 struct ibmvnic_sub_crq_queue *scrq = instance;
2577 struct ibmvnic_adapter *adapter = scrq->adapter;
2579 /* When booting a kdump kernel we can hit pending interrupts
2580 * prior to completing driver initialization.
2582 if (unlikely(adapter->state != VNIC_OPEN))
2585 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2587 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
2588 disable_scrq_irq(adapter, scrq);
2589 __napi_schedule(&adapter->napi[scrq->scrq_num]);
2595 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
2597 struct device *dev = &adapter->vdev->dev;
2598 struct ibmvnic_sub_crq_queue *scrq;
2602 for (i = 0; i < adapter->req_tx_queues; i++) {
2603 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
2605 scrq = adapter->tx_scrq[i];
2606 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2610 dev_err(dev, "Error mapping irq\n");
2611 goto req_tx_irq_failed;
2614 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
2615 0, "ibmvnic_tx", scrq);
2618 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
2620 irq_dispose_mapping(scrq->irq);
2621 goto req_tx_irq_failed;
2625 for (i = 0; i < adapter->req_rx_queues; i++) {
2626 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
2628 scrq = adapter->rx_scrq[i];
2629 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2632 dev_err(dev, "Error mapping irq\n");
2633 goto req_rx_irq_failed;
2635 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
2636 0, "ibmvnic_rx", scrq);
2638 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
2640 irq_dispose_mapping(scrq->irq);
2641 goto req_rx_irq_failed;
2647 for (j = 0; j < i; j++) {
2648 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
2649 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2651 i = adapter->req_tx_queues;
2653 for (j = 0; j < i; j++) {
2654 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
2655 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2657 release_sub_crqs(adapter, 1);
2661 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
2663 struct device *dev = &adapter->vdev->dev;
2664 struct ibmvnic_sub_crq_queue **allqueues;
2665 int registered_queues = 0;
2670 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
2672 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
2676 for (i = 0; i < total_queues; i++) {
2677 allqueues[i] = init_sub_crq_queue(adapter);
2678 if (!allqueues[i]) {
2679 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
2682 registered_queues++;
2685 /* Make sure we were able to register the minimum number of queues */
2686 if (registered_queues <
2687 adapter->min_tx_queues + adapter->min_rx_queues) {
2688 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
2692 /* Distribute the failed allocated queues*/
2693 for (i = 0; i < total_queues - registered_queues + more ; i++) {
2694 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
2697 if (adapter->req_rx_queues > adapter->min_rx_queues)
2698 adapter->req_rx_queues--;
2703 if (adapter->req_tx_queues > adapter->min_tx_queues)
2704 adapter->req_tx_queues--;
2711 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
2712 sizeof(*adapter->tx_scrq), GFP_KERNEL);
2713 if (!adapter->tx_scrq)
2716 for (i = 0; i < adapter->req_tx_queues; i++) {
2717 adapter->tx_scrq[i] = allqueues[i];
2718 adapter->tx_scrq[i]->pool_index = i;
2719 adapter->num_active_tx_scrqs++;
2722 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
2723 sizeof(*adapter->rx_scrq), GFP_KERNEL);
2724 if (!adapter->rx_scrq)
2727 for (i = 0; i < adapter->req_rx_queues; i++) {
2728 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
2729 adapter->rx_scrq[i]->scrq_num = i;
2730 adapter->num_active_rx_scrqs++;
2737 kfree(adapter->tx_scrq);
2738 adapter->tx_scrq = NULL;
2740 for (i = 0; i < registered_queues; i++)
2741 release_sub_crq_queue(adapter, allqueues[i], 1);
2746 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
2748 struct device *dev = &adapter->vdev->dev;
2749 union ibmvnic_crq crq;
2753 /* Sub-CRQ entries are 32 byte long */
2754 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
2756 if (adapter->min_tx_entries_per_subcrq > entries_page ||
2757 adapter->min_rx_add_entries_per_subcrq > entries_page) {
2758 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
2762 if (adapter->desired.mtu)
2763 adapter->req_mtu = adapter->desired.mtu;
2765 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
2767 if (!adapter->desired.tx_entries)
2768 adapter->desired.tx_entries =
2769 adapter->max_tx_entries_per_subcrq;
2770 if (!adapter->desired.rx_entries)
2771 adapter->desired.rx_entries =
2772 adapter->max_rx_add_entries_per_subcrq;
2774 max_entries = IBMVNIC_MAX_LTB_SIZE /
2775 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
2777 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
2778 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
2779 adapter->desired.tx_entries = max_entries;
2782 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
2783 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
2784 adapter->desired.rx_entries = max_entries;
2787 if (adapter->desired.tx_entries)
2788 adapter->req_tx_entries_per_subcrq =
2789 adapter->desired.tx_entries;
2791 adapter->req_tx_entries_per_subcrq =
2792 adapter->max_tx_entries_per_subcrq;
2794 if (adapter->desired.rx_entries)
2795 adapter->req_rx_add_entries_per_subcrq =
2796 adapter->desired.rx_entries;
2798 adapter->req_rx_add_entries_per_subcrq =
2799 adapter->max_rx_add_entries_per_subcrq;
2801 if (adapter->desired.tx_queues)
2802 adapter->req_tx_queues =
2803 adapter->desired.tx_queues;
2805 adapter->req_tx_queues =
2806 adapter->opt_tx_comp_sub_queues;
2808 if (adapter->desired.rx_queues)
2809 adapter->req_rx_queues =
2810 adapter->desired.rx_queues;
2812 adapter->req_rx_queues =
2813 adapter->opt_rx_comp_queues;
2815 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
2818 memset(&crq, 0, sizeof(crq));
2819 crq.request_capability.first = IBMVNIC_CRQ_CMD;
2820 crq.request_capability.cmd = REQUEST_CAPABILITY;
2822 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
2823 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
2824 atomic_inc(&adapter->running_cap_crqs);
2825 ibmvnic_send_crq(adapter, &crq);
2827 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
2828 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
2829 atomic_inc(&adapter->running_cap_crqs);
2830 ibmvnic_send_crq(adapter, &crq);
2832 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
2833 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
2834 atomic_inc(&adapter->running_cap_crqs);
2835 ibmvnic_send_crq(adapter, &crq);
2837 crq.request_capability.capability =
2838 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
2839 crq.request_capability.number =
2840 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
2841 atomic_inc(&adapter->running_cap_crqs);
2842 ibmvnic_send_crq(adapter, &crq);
2844 crq.request_capability.capability =
2845 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
2846 crq.request_capability.number =
2847 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
2848 atomic_inc(&adapter->running_cap_crqs);
2849 ibmvnic_send_crq(adapter, &crq);
2851 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
2852 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
2853 atomic_inc(&adapter->running_cap_crqs);
2854 ibmvnic_send_crq(adapter, &crq);
2856 if (adapter->netdev->flags & IFF_PROMISC) {
2857 if (adapter->promisc_supported) {
2858 crq.request_capability.capability =
2859 cpu_to_be16(PROMISC_REQUESTED);
2860 crq.request_capability.number = cpu_to_be64(1);
2861 atomic_inc(&adapter->running_cap_crqs);
2862 ibmvnic_send_crq(adapter, &crq);
2865 crq.request_capability.capability =
2866 cpu_to_be16(PROMISC_REQUESTED);
2867 crq.request_capability.number = cpu_to_be64(0);
2868 atomic_inc(&adapter->running_cap_crqs);
2869 ibmvnic_send_crq(adapter, &crq);
2873 static int pending_scrq(struct ibmvnic_adapter *adapter,
2874 struct ibmvnic_sub_crq_queue *scrq)
2876 union sub_crq *entry = &scrq->msgs[scrq->cur];
2878 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
2884 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
2885 struct ibmvnic_sub_crq_queue *scrq)
2887 union sub_crq *entry;
2888 unsigned long flags;
2890 spin_lock_irqsave(&scrq->lock, flags);
2891 entry = &scrq->msgs[scrq->cur];
2892 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
2893 if (++scrq->cur == scrq->size)
2898 spin_unlock_irqrestore(&scrq->lock, flags);
2903 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
2905 struct ibmvnic_crq_queue *queue = &adapter->crq;
2906 union ibmvnic_crq *crq;
2908 crq = &queue->msgs[queue->cur];
2909 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
2910 if (++queue->cur == queue->size)
2919 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
2920 union sub_crq *sub_crq)
2922 unsigned int ua = adapter->vdev->unit_address;
2923 struct device *dev = &adapter->vdev->dev;
2924 u64 *u64_crq = (u64 *)sub_crq;
2927 netdev_dbg(adapter->netdev,
2928 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
2929 (unsigned long int)cpu_to_be64(remote_handle),
2930 (unsigned long int)cpu_to_be64(u64_crq[0]),
2931 (unsigned long int)cpu_to_be64(u64_crq[1]),
2932 (unsigned long int)cpu_to_be64(u64_crq[2]),
2933 (unsigned long int)cpu_to_be64(u64_crq[3]));
2935 /* Make sure the hypervisor sees the complete request */
2938 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
2939 cpu_to_be64(remote_handle),
2940 cpu_to_be64(u64_crq[0]),
2941 cpu_to_be64(u64_crq[1]),
2942 cpu_to_be64(u64_crq[2]),
2943 cpu_to_be64(u64_crq[3]));
2947 dev_warn(dev, "CRQ Queue closed\n");
2948 dev_err(dev, "Send error (rc=%d)\n", rc);
2954 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
2955 u64 remote_handle, u64 ioba, u64 num_entries)
2957 unsigned int ua = adapter->vdev->unit_address;
2958 struct device *dev = &adapter->vdev->dev;
2961 /* Make sure the hypervisor sees the complete request */
2963 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
2964 cpu_to_be64(remote_handle),
2969 dev_warn(dev, "CRQ Queue closed\n");
2970 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
2976 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
2977 union ibmvnic_crq *crq)
2979 unsigned int ua = adapter->vdev->unit_address;
2980 struct device *dev = &adapter->vdev->dev;
2981 u64 *u64_crq = (u64 *)crq;
2984 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
2985 (unsigned long int)cpu_to_be64(u64_crq[0]),
2986 (unsigned long int)cpu_to_be64(u64_crq[1]));
2988 /* Make sure the hypervisor sees the complete request */
2991 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
2992 cpu_to_be64(u64_crq[0]),
2993 cpu_to_be64(u64_crq[1]));
2996 if (rc == H_CLOSED) {
2997 dev_warn(dev, "CRQ Queue closed\n");
2998 if (adapter->resetting)
2999 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3002 dev_warn(dev, "Send error (rc=%d)\n", rc);
3008 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3010 union ibmvnic_crq crq;
3012 memset(&crq, 0, sizeof(crq));
3013 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3014 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3015 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3017 return ibmvnic_send_crq(adapter, &crq);
3020 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3022 union ibmvnic_crq crq;
3024 memset(&crq, 0, sizeof(crq));
3025 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3026 crq.version_exchange.cmd = VERSION_EXCHANGE;
3027 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3029 return ibmvnic_send_crq(adapter, &crq);
3032 struct vnic_login_client_data {
3038 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3042 /* Calculate the amount of buffer space needed for the
3043 * vnic client data in the login buffer. There are four entries,
3044 * OS name, LPAR name, device name, and a null last entry.
3046 len = 4 * sizeof(struct vnic_login_client_data);
3047 len += 6; /* "Linux" plus NULL */
3048 len += strlen(utsname()->nodename) + 1;
3049 len += strlen(adapter->netdev->name) + 1;
3054 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3055 struct vnic_login_client_data *vlcd)
3057 const char *os_name = "Linux";
3060 /* Type 1 - LPAR OS */
3062 len = strlen(os_name) + 1;
3063 vlcd->len = cpu_to_be16(len);
3064 strncpy(&vlcd->name, os_name, len);
3065 vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len);
3067 /* Type 2 - LPAR name */
3069 len = strlen(utsname()->nodename) + 1;
3070 vlcd->len = cpu_to_be16(len);
3071 strncpy(&vlcd->name, utsname()->nodename, len);
3072 vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len);
3074 /* Type 3 - device name */
3076 len = strlen(adapter->netdev->name) + 1;
3077 vlcd->len = cpu_to_be16(len);
3078 strncpy(&vlcd->name, adapter->netdev->name, len);
3081 static int send_login(struct ibmvnic_adapter *adapter)
3083 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3084 struct ibmvnic_login_buffer *login_buffer;
3085 struct device *dev = &adapter->vdev->dev;
3086 dma_addr_t rsp_buffer_token;
3087 dma_addr_t buffer_token;
3088 size_t rsp_buffer_size;
3089 union ibmvnic_crq crq;
3093 int client_data_len;
3094 struct vnic_login_client_data *vlcd;
3097 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3098 netdev_err(adapter->netdev,
3099 "RX or TX queues are not allocated, device login failed\n");
3103 release_login_rsp_buffer(adapter);
3104 client_data_len = vnic_client_data_len(adapter);
3107 sizeof(struct ibmvnic_login_buffer) +
3108 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3111 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3113 goto buf_alloc_failed;
3115 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3117 if (dma_mapping_error(dev, buffer_token)) {
3118 dev_err(dev, "Couldn't map login buffer\n");
3119 goto buf_map_failed;
3122 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3123 sizeof(u64) * adapter->req_tx_queues +
3124 sizeof(u64) * adapter->req_rx_queues +
3125 sizeof(u64) * adapter->req_rx_queues +
3126 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3128 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3129 if (!login_rsp_buffer)
3130 goto buf_rsp_alloc_failed;
3132 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3133 rsp_buffer_size, DMA_FROM_DEVICE);
3134 if (dma_mapping_error(dev, rsp_buffer_token)) {
3135 dev_err(dev, "Couldn't map login rsp buffer\n");
3136 goto buf_rsp_map_failed;
3139 adapter->login_buf = login_buffer;
3140 adapter->login_buf_token = buffer_token;
3141 adapter->login_buf_sz = buffer_size;
3142 adapter->login_rsp_buf = login_rsp_buffer;
3143 adapter->login_rsp_buf_token = rsp_buffer_token;
3144 adapter->login_rsp_buf_sz = rsp_buffer_size;
3146 login_buffer->len = cpu_to_be32(buffer_size);
3147 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3148 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3149 login_buffer->off_txcomp_subcrqs =
3150 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3151 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3152 login_buffer->off_rxcomp_subcrqs =
3153 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3154 sizeof(u64) * adapter->req_tx_queues);
3155 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3156 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3158 tx_list_p = (__be64 *)((char *)login_buffer +
3159 sizeof(struct ibmvnic_login_buffer));
3160 rx_list_p = (__be64 *)((char *)login_buffer +
3161 sizeof(struct ibmvnic_login_buffer) +
3162 sizeof(u64) * adapter->req_tx_queues);
3164 for (i = 0; i < adapter->req_tx_queues; i++) {
3165 if (adapter->tx_scrq[i]) {
3166 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3171 for (i = 0; i < adapter->req_rx_queues; i++) {
3172 if (adapter->rx_scrq[i]) {
3173 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3178 /* Insert vNIC login client data */
3179 vlcd = (struct vnic_login_client_data *)
3180 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3181 login_buffer->client_data_offset =
3182 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3183 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3185 vnic_add_client_data(adapter, vlcd);
3187 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3188 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3189 netdev_dbg(adapter->netdev, "%016lx\n",
3190 ((unsigned long int *)(adapter->login_buf))[i]);
3193 memset(&crq, 0, sizeof(crq));
3194 crq.login.first = IBMVNIC_CRQ_CMD;
3195 crq.login.cmd = LOGIN;
3196 crq.login.ioba = cpu_to_be32(buffer_token);
3197 crq.login.len = cpu_to_be32(buffer_size);
3198 ibmvnic_send_crq(adapter, &crq);
3203 kfree(login_rsp_buffer);
3204 buf_rsp_alloc_failed:
3205 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3207 kfree(login_buffer);
3212 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3215 union ibmvnic_crq crq;
3217 memset(&crq, 0, sizeof(crq));
3218 crq.request_map.first = IBMVNIC_CRQ_CMD;
3219 crq.request_map.cmd = REQUEST_MAP;
3220 crq.request_map.map_id = map_id;
3221 crq.request_map.ioba = cpu_to_be32(addr);
3222 crq.request_map.len = cpu_to_be32(len);
3223 ibmvnic_send_crq(adapter, &crq);
3226 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3228 union ibmvnic_crq crq;
3230 memset(&crq, 0, sizeof(crq));
3231 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3232 crq.request_unmap.cmd = REQUEST_UNMAP;
3233 crq.request_unmap.map_id = map_id;
3234 ibmvnic_send_crq(adapter, &crq);
3237 static void send_map_query(struct ibmvnic_adapter *adapter)
3239 union ibmvnic_crq crq;
3241 memset(&crq, 0, sizeof(crq));
3242 crq.query_map.first = IBMVNIC_CRQ_CMD;
3243 crq.query_map.cmd = QUERY_MAP;
3244 ibmvnic_send_crq(adapter, &crq);
3247 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3248 static void send_cap_queries(struct ibmvnic_adapter *adapter)
3250 union ibmvnic_crq crq;
3252 atomic_set(&adapter->running_cap_crqs, 0);
3253 memset(&crq, 0, sizeof(crq));
3254 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3255 crq.query_capability.cmd = QUERY_CAPABILITY;
3257 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3258 atomic_inc(&adapter->running_cap_crqs);
3259 ibmvnic_send_crq(adapter, &crq);
3261 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3262 atomic_inc(&adapter->running_cap_crqs);
3263 ibmvnic_send_crq(adapter, &crq);
3265 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3266 atomic_inc(&adapter->running_cap_crqs);
3267 ibmvnic_send_crq(adapter, &crq);
3269 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3270 atomic_inc(&adapter->running_cap_crqs);
3271 ibmvnic_send_crq(adapter, &crq);
3273 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3274 atomic_inc(&adapter->running_cap_crqs);
3275 ibmvnic_send_crq(adapter, &crq);
3277 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3278 atomic_inc(&adapter->running_cap_crqs);
3279 ibmvnic_send_crq(adapter, &crq);
3281 crq.query_capability.capability =
3282 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3283 atomic_inc(&adapter->running_cap_crqs);
3284 ibmvnic_send_crq(adapter, &crq);
3286 crq.query_capability.capability =
3287 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3288 atomic_inc(&adapter->running_cap_crqs);
3289 ibmvnic_send_crq(adapter, &crq);
3291 crq.query_capability.capability =
3292 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3293 atomic_inc(&adapter->running_cap_crqs);
3294 ibmvnic_send_crq(adapter, &crq);
3296 crq.query_capability.capability =
3297 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3298 atomic_inc(&adapter->running_cap_crqs);
3299 ibmvnic_send_crq(adapter, &crq);
3301 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3302 atomic_inc(&adapter->running_cap_crqs);
3303 ibmvnic_send_crq(adapter, &crq);
3305 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3306 atomic_inc(&adapter->running_cap_crqs);
3307 ibmvnic_send_crq(adapter, &crq);
3309 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3310 atomic_inc(&adapter->running_cap_crqs);
3311 ibmvnic_send_crq(adapter, &crq);
3313 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3314 atomic_inc(&adapter->running_cap_crqs);
3315 ibmvnic_send_crq(adapter, &crq);
3317 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3318 atomic_inc(&adapter->running_cap_crqs);
3319 ibmvnic_send_crq(adapter, &crq);
3321 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3322 atomic_inc(&adapter->running_cap_crqs);
3323 ibmvnic_send_crq(adapter, &crq);
3325 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3326 atomic_inc(&adapter->running_cap_crqs);
3327 ibmvnic_send_crq(adapter, &crq);
3329 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3330 atomic_inc(&adapter->running_cap_crqs);
3331 ibmvnic_send_crq(adapter, &crq);
3333 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3334 atomic_inc(&adapter->running_cap_crqs);
3335 ibmvnic_send_crq(adapter, &crq);
3337 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3338 atomic_inc(&adapter->running_cap_crqs);
3339 ibmvnic_send_crq(adapter, &crq);
3341 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3342 atomic_inc(&adapter->running_cap_crqs);
3343 ibmvnic_send_crq(adapter, &crq);
3345 crq.query_capability.capability =
3346 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3347 atomic_inc(&adapter->running_cap_crqs);
3348 ibmvnic_send_crq(adapter, &crq);
3350 crq.query_capability.capability =
3351 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3352 atomic_inc(&adapter->running_cap_crqs);
3353 ibmvnic_send_crq(adapter, &crq);
3355 crq.query_capability.capability =
3356 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3357 atomic_inc(&adapter->running_cap_crqs);
3358 ibmvnic_send_crq(adapter, &crq);
3360 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3361 atomic_inc(&adapter->running_cap_crqs);
3362 ibmvnic_send_crq(adapter, &crq);
3365 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3366 struct ibmvnic_adapter *adapter)
3368 struct device *dev = &adapter->vdev->dev;
3370 if (crq->get_vpd_size_rsp.rc.code) {
3371 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3372 crq->get_vpd_size_rsp.rc.code);
3373 complete(&adapter->fw_done);
3377 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3378 complete(&adapter->fw_done);
3381 static void handle_vpd_rsp(union ibmvnic_crq *crq,
3382 struct ibmvnic_adapter *adapter)
3384 struct device *dev = &adapter->vdev->dev;
3385 unsigned char *substr = NULL;
3386 u8 fw_level_len = 0;
3388 memset(adapter->fw_version, 0, 32);
3390 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3393 if (crq->get_vpd_rsp.rc.code) {
3394 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3395 crq->get_vpd_rsp.rc.code);
3399 /* get the position of the firmware version info
3400 * located after the ASCII 'RM' substring in the buffer
3402 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3404 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
3408 /* get length of firmware level ASCII substring */
3409 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3410 fw_level_len = *(substr + 2);
3412 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3416 /* copy firmware version string from vpd into adapter */
3417 if ((substr + 3 + fw_level_len) <
3418 (adapter->vpd->buff + adapter->vpd->len)) {
3419 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
3421 dev_info(dev, "FW substr extrapolated VPD buff\n");
3425 if (adapter->fw_version[0] == '\0')
3426 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
3427 complete(&adapter->fw_done);
3430 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3432 struct device *dev = &adapter->vdev->dev;
3433 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
3434 union ibmvnic_crq crq;
3437 dma_unmap_single(dev, adapter->ip_offload_tok,
3438 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3440 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3441 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3442 netdev_dbg(adapter->netdev, "%016lx\n",
3443 ((unsigned long int *)(buf))[i]);
3445 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3446 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3447 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3448 buf->tcp_ipv4_chksum);
3449 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3450 buf->tcp_ipv6_chksum);
3451 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3452 buf->udp_ipv4_chksum);
3453 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3454 buf->udp_ipv6_chksum);
3455 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
3456 buf->large_tx_ipv4);
3457 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
3458 buf->large_tx_ipv6);
3459 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
3460 buf->large_rx_ipv4);
3461 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
3462 buf->large_rx_ipv6);
3463 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
3464 buf->max_ipv4_header_size);
3465 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
3466 buf->max_ipv6_header_size);
3467 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
3468 buf->max_tcp_header_size);
3469 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
3470 buf->max_udp_header_size);
3471 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
3472 buf->max_large_tx_size);
3473 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
3474 buf->max_large_rx_size);
3475 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
3476 buf->ipv6_extension_header);
3477 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
3478 buf->tcp_pseudosum_req);
3479 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
3480 buf->num_ipv6_ext_headers);
3481 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
3482 buf->off_ipv6_ext_headers);
3484 adapter->ip_offload_ctrl_tok =
3485 dma_map_single(dev, &adapter->ip_offload_ctrl,
3486 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
3488 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
3489 dev_err(dev, "Couldn't map ip offload control buffer\n");
3493 adapter->ip_offload_ctrl.len =
3494 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3495 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3496 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3497 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3498 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3499 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3500 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3501 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
3502 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
3503 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
3505 /* large_rx disabled for now, additional features needed */
3506 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3507 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3509 adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
3511 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3512 adapter->netdev->features |= NETIF_F_IP_CSUM;
3514 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3515 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
3517 if ((adapter->netdev->features &
3518 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3519 adapter->netdev->features |= NETIF_F_RXCSUM;
3521 if (buf->large_tx_ipv4)
3522 adapter->netdev->features |= NETIF_F_TSO;
3523 if (buf->large_tx_ipv6)
3524 adapter->netdev->features |= NETIF_F_TSO6;
3526 adapter->netdev->hw_features |= adapter->netdev->features;
3528 memset(&crq, 0, sizeof(crq));
3529 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
3530 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
3531 crq.control_ip_offload.len =
3532 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3533 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
3534 ibmvnic_send_crq(adapter, &crq);
3537 static void handle_error_info_rsp(union ibmvnic_crq *crq,
3538 struct ibmvnic_adapter *adapter)
3540 struct device *dev = &adapter->vdev->dev;
3541 struct ibmvnic_error_buff *error_buff, *tmp;
3542 unsigned long flags;
3546 if (!crq->request_error_rsp.rc.code) {
3547 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
3548 crq->request_error_rsp.rc.code);
3552 spin_lock_irqsave(&adapter->error_list_lock, flags);
3553 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
3554 if (error_buff->error_id == crq->request_error_rsp.error_id) {
3556 list_del(&error_buff->list);
3559 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3562 dev_err(dev, "Couldn't find error id %x\n",
3563 be32_to_cpu(crq->request_error_rsp.error_id));
3567 dev_err(dev, "Detailed info for error id %x:",
3568 be32_to_cpu(crq->request_error_rsp.error_id));
3570 for (i = 0; i < error_buff->len; i++) {
3571 pr_cont("%02x", (int)error_buff->buff[i]);
3577 dma_unmap_single(dev, error_buff->dma, error_buff->len,
3579 kfree(error_buff->buff);
3583 static void request_error_information(struct ibmvnic_adapter *adapter,
3584 union ibmvnic_crq *err_crq)
3586 struct device *dev = &adapter->vdev->dev;
3587 struct net_device *netdev = adapter->netdev;
3588 struct ibmvnic_error_buff *error_buff;
3589 unsigned long timeout = msecs_to_jiffies(30000);
3590 union ibmvnic_crq crq;
3591 unsigned long flags;
3594 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
3598 detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
3599 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
3600 if (!error_buff->buff) {
3605 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
3607 if (dma_mapping_error(dev, error_buff->dma)) {
3608 netdev_err(netdev, "Couldn't map error buffer\n");
3609 kfree(error_buff->buff);
3614 error_buff->len = detail_len;
3615 error_buff->error_id = err_crq->error_indication.error_id;
3617 spin_lock_irqsave(&adapter->error_list_lock, flags);
3618 list_add_tail(&error_buff->list, &adapter->errors);
3619 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3621 memset(&crq, 0, sizeof(crq));
3622 crq.request_error_info.first = IBMVNIC_CRQ_CMD;
3623 crq.request_error_info.cmd = REQUEST_ERROR_INFO;
3624 crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
3625 crq.request_error_info.len = cpu_to_be32(detail_len);
3626 crq.request_error_info.error_id = err_crq->error_indication.error_id;
3628 rc = ibmvnic_send_crq(adapter, &crq);
3630 netdev_err(netdev, "failed to request error information\n");
3634 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3635 netdev_err(netdev, "timeout waiting for error information\n");
3642 spin_lock_irqsave(&adapter->error_list_lock, flags);
3643 list_del(&error_buff->list);
3644 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3646 kfree(error_buff->buff);
3650 static void handle_error_indication(union ibmvnic_crq *crq,
3651 struct ibmvnic_adapter *adapter)
3653 struct device *dev = &adapter->vdev->dev;
3655 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
3656 crq->error_indication.flags
3657 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
3658 be32_to_cpu(crq->error_indication.error_id),
3659 be16_to_cpu(crq->error_indication.error_cause));
3661 if (be32_to_cpu(crq->error_indication.error_id))
3662 request_error_information(adapter, crq);
3664 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
3665 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3667 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
3670 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
3671 struct ibmvnic_adapter *adapter)
3673 struct net_device *netdev = adapter->netdev;
3674 struct device *dev = &adapter->vdev->dev;
3677 rc = crq->change_mac_addr_rsp.rc.code;
3679 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
3682 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
3685 complete(&adapter->fw_done);
3689 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3690 struct ibmvnic_adapter *adapter)
3692 struct device *dev = &adapter->vdev->dev;
3696 atomic_dec(&adapter->running_cap_crqs);
3697 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
3699 req_value = &adapter->req_tx_queues;
3703 req_value = &adapter->req_rx_queues;
3706 case REQ_RX_ADD_QUEUES:
3707 req_value = &adapter->req_rx_add_queues;
3710 case REQ_TX_ENTRIES_PER_SUBCRQ:
3711 req_value = &adapter->req_tx_entries_per_subcrq;
3712 name = "tx_entries_per_subcrq";
3714 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
3715 req_value = &adapter->req_rx_add_entries_per_subcrq;
3716 name = "rx_add_entries_per_subcrq";
3719 req_value = &adapter->req_mtu;
3722 case PROMISC_REQUESTED:
3723 req_value = &adapter->promisc;
3727 dev_err(dev, "Got invalid cap request rsp %d\n",
3728 crq->request_capability.capability);
3732 switch (crq->request_capability_rsp.rc.code) {
3735 case PARTIALSUCCESS:
3736 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3738 (long int)be64_to_cpu(crq->request_capability_rsp.
3741 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3743 pr_err("mtu of %llu is not supported. Reverting.\n",
3745 *req_value = adapter->fallback.mtu;
3748 be64_to_cpu(crq->request_capability_rsp.number);
3751 ibmvnic_send_req_caps(adapter, 1);
3754 dev_err(dev, "Error %d in request cap rsp\n",
3755 crq->request_capability_rsp.rc.code);
3759 /* Done receiving requested capabilities, query IP offload support */
3760 if (atomic_read(&adapter->running_cap_crqs) == 0) {
3761 union ibmvnic_crq newcrq;
3762 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
3763 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
3764 &adapter->ip_offload_buf;
3766 adapter->wait_capability = false;
3767 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
3771 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
3772 if (!firmware_has_feature(FW_FEATURE_CMO))
3773 dev_err(dev, "Couldn't map offload buffer\n");
3777 memset(&newcrq, 0, sizeof(newcrq));
3778 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
3779 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
3780 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
3781 newcrq.query_ip_offload.ioba =
3782 cpu_to_be32(adapter->ip_offload_tok);
3784 ibmvnic_send_crq(adapter, &newcrq);
3788 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
3789 struct ibmvnic_adapter *adapter)
3791 struct device *dev = &adapter->vdev->dev;
3792 struct net_device *netdev = adapter->netdev;
3793 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
3794 struct ibmvnic_login_buffer *login = adapter->login_buf;
3797 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
3799 dma_unmap_single(dev, adapter->login_rsp_buf_token,
3800 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
3802 /* If the number of queues requested can't be allocated by the
3803 * server, the login response will return with code 1. We will need
3804 * to resend the login buffer with fewer queues requested.
3806 if (login_rsp_crq->generic.rc.code) {
3807 adapter->renegotiate = true;
3808 complete(&adapter->init_done);
3812 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3814 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
3815 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
3816 netdev_dbg(adapter->netdev, "%016lx\n",
3817 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
3821 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
3822 (be32_to_cpu(login->num_rxcomp_subcrqs) *
3823 adapter->req_rx_add_queues !=
3824 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
3825 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
3826 ibmvnic_remove(adapter->vdev);
3829 release_login_buffer(adapter);
3830 complete(&adapter->init_done);
3835 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
3836 struct ibmvnic_adapter *adapter)
3838 struct device *dev = &adapter->vdev->dev;
3841 rc = crq->request_unmap_rsp.rc.code;
3843 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
3846 static void handle_query_map_rsp(union ibmvnic_crq *crq,
3847 struct ibmvnic_adapter *adapter)
3849 struct net_device *netdev = adapter->netdev;
3850 struct device *dev = &adapter->vdev->dev;
3853 rc = crq->query_map_rsp.rc.code;
3855 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
3858 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
3859 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
3860 crq->query_map_rsp.free_pages);
3863 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
3864 struct ibmvnic_adapter *adapter)
3866 struct net_device *netdev = adapter->netdev;
3867 struct device *dev = &adapter->vdev->dev;
3870 atomic_dec(&adapter->running_cap_crqs);
3871 netdev_dbg(netdev, "Outstanding queries: %d\n",
3872 atomic_read(&adapter->running_cap_crqs));
3873 rc = crq->query_capability.rc.code;
3875 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
3879 switch (be16_to_cpu(crq->query_capability.capability)) {
3881 adapter->min_tx_queues =
3882 be64_to_cpu(crq->query_capability.number);
3883 netdev_dbg(netdev, "min_tx_queues = %lld\n",
3884 adapter->min_tx_queues);
3887 adapter->min_rx_queues =
3888 be64_to_cpu(crq->query_capability.number);
3889 netdev_dbg(netdev, "min_rx_queues = %lld\n",
3890 adapter->min_rx_queues);
3892 case MIN_RX_ADD_QUEUES:
3893 adapter->min_rx_add_queues =
3894 be64_to_cpu(crq->query_capability.number);
3895 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
3896 adapter->min_rx_add_queues);
3899 adapter->max_tx_queues =
3900 be64_to_cpu(crq->query_capability.number);
3901 netdev_dbg(netdev, "max_tx_queues = %lld\n",
3902 adapter->max_tx_queues);
3905 adapter->max_rx_queues =
3906 be64_to_cpu(crq->query_capability.number);
3907 netdev_dbg(netdev, "max_rx_queues = %lld\n",
3908 adapter->max_rx_queues);
3910 case MAX_RX_ADD_QUEUES:
3911 adapter->max_rx_add_queues =
3912 be64_to_cpu(crq->query_capability.number);
3913 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
3914 adapter->max_rx_add_queues);
3916 case MIN_TX_ENTRIES_PER_SUBCRQ:
3917 adapter->min_tx_entries_per_subcrq =
3918 be64_to_cpu(crq->query_capability.number);
3919 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
3920 adapter->min_tx_entries_per_subcrq);
3922 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
3923 adapter->min_rx_add_entries_per_subcrq =
3924 be64_to_cpu(crq->query_capability.number);
3925 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
3926 adapter->min_rx_add_entries_per_subcrq);
3928 case MAX_TX_ENTRIES_PER_SUBCRQ:
3929 adapter->max_tx_entries_per_subcrq =
3930 be64_to_cpu(crq->query_capability.number);
3931 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
3932 adapter->max_tx_entries_per_subcrq);
3934 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
3935 adapter->max_rx_add_entries_per_subcrq =
3936 be64_to_cpu(crq->query_capability.number);
3937 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
3938 adapter->max_rx_add_entries_per_subcrq);
3940 case TCP_IP_OFFLOAD:
3941 adapter->tcp_ip_offload =
3942 be64_to_cpu(crq->query_capability.number);
3943 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
3944 adapter->tcp_ip_offload);
3946 case PROMISC_SUPPORTED:
3947 adapter->promisc_supported =
3948 be64_to_cpu(crq->query_capability.number);
3949 netdev_dbg(netdev, "promisc_supported = %lld\n",
3950 adapter->promisc_supported);
3953 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
3954 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
3955 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
3958 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
3959 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
3960 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
3962 case MAX_MULTICAST_FILTERS:
3963 adapter->max_multicast_filters =
3964 be64_to_cpu(crq->query_capability.number);
3965 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
3966 adapter->max_multicast_filters);
3968 case VLAN_HEADER_INSERTION:
3969 adapter->vlan_header_insertion =
3970 be64_to_cpu(crq->query_capability.number);
3971 if (adapter->vlan_header_insertion)
3972 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
3973 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
3974 adapter->vlan_header_insertion);
3976 case RX_VLAN_HEADER_INSERTION:
3977 adapter->rx_vlan_header_insertion =
3978 be64_to_cpu(crq->query_capability.number);
3979 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
3980 adapter->rx_vlan_header_insertion);
3982 case MAX_TX_SG_ENTRIES:
3983 adapter->max_tx_sg_entries =
3984 be64_to_cpu(crq->query_capability.number);
3985 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
3986 adapter->max_tx_sg_entries);
3988 case RX_SG_SUPPORTED:
3989 adapter->rx_sg_supported =
3990 be64_to_cpu(crq->query_capability.number);
3991 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
3992 adapter->rx_sg_supported);
3994 case OPT_TX_COMP_SUB_QUEUES:
3995 adapter->opt_tx_comp_sub_queues =
3996 be64_to_cpu(crq->query_capability.number);
3997 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
3998 adapter->opt_tx_comp_sub_queues);
4000 case OPT_RX_COMP_QUEUES:
4001 adapter->opt_rx_comp_queues =
4002 be64_to_cpu(crq->query_capability.number);
4003 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4004 adapter->opt_rx_comp_queues);
4006 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4007 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4008 be64_to_cpu(crq->query_capability.number);
4009 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4010 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4012 case OPT_TX_ENTRIES_PER_SUBCRQ:
4013 adapter->opt_tx_entries_per_subcrq =
4014 be64_to_cpu(crq->query_capability.number);
4015 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4016 adapter->opt_tx_entries_per_subcrq);
4018 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4019 adapter->opt_rxba_entries_per_subcrq =
4020 be64_to_cpu(crq->query_capability.number);
4021 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4022 adapter->opt_rxba_entries_per_subcrq);
4024 case TX_RX_DESC_REQ:
4025 adapter->tx_rx_desc_req = crq->query_capability.number;
4026 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4027 adapter->tx_rx_desc_req);
4031 netdev_err(netdev, "Got invalid cap rsp %d\n",
4032 crq->query_capability.capability);
4036 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4037 adapter->wait_capability = false;
4038 ibmvnic_send_req_caps(adapter, 0);
4042 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4043 struct ibmvnic_adapter *adapter)
4045 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4046 struct net_device *netdev = adapter->netdev;
4047 struct device *dev = &adapter->vdev->dev;
4048 u64 *u64_crq = (u64 *)crq;
4051 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4052 (unsigned long int)cpu_to_be64(u64_crq[0]),
4053 (unsigned long int)cpu_to_be64(u64_crq[1]));
4054 switch (gen_crq->first) {
4055 case IBMVNIC_CRQ_INIT_RSP:
4056 switch (gen_crq->cmd) {
4057 case IBMVNIC_CRQ_INIT:
4058 dev_info(dev, "Partner initialized\n");
4059 adapter->from_passive_init = true;
4060 complete(&adapter->init_done);
4062 case IBMVNIC_CRQ_INIT_COMPLETE:
4063 dev_info(dev, "Partner initialization complete\n");
4064 send_version_xchg(adapter);
4067 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4070 case IBMVNIC_CRQ_XPORT_EVENT:
4071 netif_carrier_off(netdev);
4072 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4073 dev_info(dev, "Migrated, re-enabling adapter\n");
4074 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4075 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4076 dev_info(dev, "Backing device failover detected\n");
4077 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4079 /* The adapter lost the connection */
4080 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4082 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4085 case IBMVNIC_CRQ_CMD_RSP:
4088 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4093 switch (gen_crq->cmd) {
4094 case VERSION_EXCHANGE_RSP:
4095 rc = crq->version_exchange_rsp.rc.code;
4097 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4100 dev_info(dev, "Partner protocol version is %d\n",
4101 crq->version_exchange_rsp.version);
4102 if (be16_to_cpu(crq->version_exchange_rsp.version) <
4105 be16_to_cpu(crq->version_exchange_rsp.version);
4106 send_cap_queries(adapter);
4108 case QUERY_CAPABILITY_RSP:
4109 handle_query_cap_rsp(crq, adapter);
4112 handle_query_map_rsp(crq, adapter);
4114 case REQUEST_MAP_RSP:
4115 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4116 complete(&adapter->fw_done);
4118 case REQUEST_UNMAP_RSP:
4119 handle_request_unmap_rsp(crq, adapter);
4121 case REQUEST_CAPABILITY_RSP:
4122 handle_request_cap_rsp(crq, adapter);
4125 netdev_dbg(netdev, "Got Login Response\n");
4126 handle_login_rsp(crq, adapter);
4128 case LOGICAL_LINK_STATE_RSP:
4130 "Got Logical Link State Response, state: %d rc: %d\n",
4131 crq->logical_link_state_rsp.link_state,
4132 crq->logical_link_state_rsp.rc.code);
4133 adapter->logical_link_state =
4134 crq->logical_link_state_rsp.link_state;
4135 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4136 complete(&adapter->init_done);
4138 case LINK_STATE_INDICATION:
4139 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4140 adapter->phys_link_state =
4141 crq->link_state_indication.phys_link_state;
4142 adapter->logical_link_state =
4143 crq->link_state_indication.logical_link_state;
4145 case CHANGE_MAC_ADDR_RSP:
4146 netdev_dbg(netdev, "Got MAC address change Response\n");
4147 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4149 case ERROR_INDICATION:
4150 netdev_dbg(netdev, "Got Error Indication\n");
4151 handle_error_indication(crq, adapter);
4153 case REQUEST_ERROR_RSP:
4154 netdev_dbg(netdev, "Got Error Detail Response\n");
4155 handle_error_info_rsp(crq, adapter);
4157 case REQUEST_STATISTICS_RSP:
4158 netdev_dbg(netdev, "Got Statistics Response\n");
4159 complete(&adapter->stats_done);
4161 case QUERY_IP_OFFLOAD_RSP:
4162 netdev_dbg(netdev, "Got Query IP offload Response\n");
4163 handle_query_ip_offload_rsp(adapter);
4165 case MULTICAST_CTRL_RSP:
4166 netdev_dbg(netdev, "Got multicast control Response\n");
4168 case CONTROL_IP_OFFLOAD_RSP:
4169 netdev_dbg(netdev, "Got Control IP offload Response\n");
4170 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4171 sizeof(adapter->ip_offload_ctrl),
4173 complete(&adapter->init_done);
4175 case COLLECT_FW_TRACE_RSP:
4176 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4177 complete(&adapter->fw_done);
4179 case GET_VPD_SIZE_RSP:
4180 handle_vpd_size_rsp(crq, adapter);
4183 handle_vpd_rsp(crq, adapter);
4186 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4191 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4193 struct ibmvnic_adapter *adapter = instance;
4195 tasklet_schedule(&adapter->tasklet);
4199 static void ibmvnic_tasklet(void *data)
4201 struct ibmvnic_adapter *adapter = data;
4202 struct ibmvnic_crq_queue *queue = &adapter->crq;
4203 union ibmvnic_crq *crq;
4204 unsigned long flags;
4207 spin_lock_irqsave(&queue->lock, flags);
4209 /* Pull all the valid messages off the CRQ */
4210 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4211 ibmvnic_handle_crq(crq, adapter);
4212 crq->generic.first = 0;
4215 /* remain in tasklet until all
4216 * capabilities responses are received
4218 if (!adapter->wait_capability)
4221 /* if capabilities CRQ's were sent in this tasklet, the following
4222 * tasklet must wait until all responses are received
4224 if (atomic_read(&adapter->running_cap_crqs) != 0)
4225 adapter->wait_capability = true;
4226 spin_unlock_irqrestore(&queue->lock, flags);
4229 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4231 struct vio_dev *vdev = adapter->vdev;
4235 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4236 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4239 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4244 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4246 struct ibmvnic_crq_queue *crq = &adapter->crq;
4247 struct device *dev = &adapter->vdev->dev;
4248 struct vio_dev *vdev = adapter->vdev;
4253 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4254 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4256 /* Clean out the queue */
4257 memset(crq->msgs, 0, PAGE_SIZE);
4260 /* And re-open it again */
4261 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4262 crq->msg_token, PAGE_SIZE);
4265 /* Adapter is good, but other end is not ready */
4266 dev_warn(dev, "Partner adapter not ready\n");
4268 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4273 static void release_crq_queue(struct ibmvnic_adapter *adapter)
4275 struct ibmvnic_crq_queue *crq = &adapter->crq;
4276 struct vio_dev *vdev = adapter->vdev;
4282 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4283 free_irq(vdev->irq, adapter);
4284 tasklet_kill(&adapter->tasklet);
4286 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4287 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4289 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4291 free_page((unsigned long)crq->msgs);
4295 static int init_crq_queue(struct ibmvnic_adapter *adapter)
4297 struct ibmvnic_crq_queue *crq = &adapter->crq;
4298 struct device *dev = &adapter->vdev->dev;
4299 struct vio_dev *vdev = adapter->vdev;
4300 int rc, retrc = -ENOMEM;
4305 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4306 /* Should we allocate more than one page? */
4311 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4312 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4314 if (dma_mapping_error(dev, crq->msg_token))
4317 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4318 crq->msg_token, PAGE_SIZE);
4320 if (rc == H_RESOURCE)
4321 /* maybe kexecing and resource is busy. try a reset */
4322 rc = ibmvnic_reset_crq(adapter);
4325 if (rc == H_CLOSED) {
4326 dev_warn(dev, "Partner adapter not ready\n");
4328 dev_warn(dev, "Error %d opening adapter\n", rc);
4329 goto reg_crq_failed;
4334 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4335 (unsigned long)adapter);
4337 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4338 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
4341 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4343 goto req_irq_failed;
4346 rc = vio_enable_interrupts(vdev);
4348 dev_err(dev, "Error %d enabling interrupts\n", rc);
4349 goto req_irq_failed;
4353 spin_lock_init(&crq->lock);
4358 tasklet_kill(&adapter->tasklet);
4360 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4361 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4363 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4365 free_page((unsigned long)crq->msgs);
4370 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4372 struct device *dev = &adapter->vdev->dev;
4373 unsigned long timeout = msecs_to_jiffies(30000);
4374 u64 old_num_rx_queues, old_num_tx_queues;
4377 if (adapter->resetting && !adapter->wait_for_reset) {
4378 rc = ibmvnic_reset_crq(adapter);
4380 rc = vio_enable_interrupts(adapter->vdev);
4382 rc = init_crq_queue(adapter);
4386 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
4390 adapter->from_passive_init = false;
4392 old_num_rx_queues = adapter->req_rx_queues;
4393 old_num_tx_queues = adapter->req_tx_queues;
4395 init_completion(&adapter->init_done);
4396 adapter->init_done_rc = 0;
4397 ibmvnic_send_crq_init(adapter);
4398 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4399 dev_err(dev, "Initialization sequence timed out\n");
4403 if (adapter->init_done_rc) {
4404 release_crq_queue(adapter);
4405 return adapter->init_done_rc;
4408 if (adapter->from_passive_init) {
4409 adapter->state = VNIC_OPEN;
4410 adapter->from_passive_init = false;
4414 if (adapter->resetting && !adapter->wait_for_reset) {
4415 if (adapter->req_rx_queues != old_num_rx_queues ||
4416 adapter->req_tx_queues != old_num_tx_queues) {
4417 release_sub_crqs(adapter, 0);
4418 rc = init_sub_crqs(adapter);
4420 rc = reset_sub_crq_queues(adapter);
4423 rc = init_sub_crqs(adapter);
4427 dev_err(dev, "Initialization of sub crqs failed\n");
4428 release_crq_queue(adapter);
4432 rc = init_sub_crq_irqs(adapter);
4434 dev_err(dev, "Failed to initialize sub crq irqs\n");
4435 release_crq_queue(adapter);
4438 rc = init_stats_buffers(adapter);
4442 rc = init_stats_token(adapter);
4449 static struct device_attribute dev_attr_failover;
4451 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4453 struct ibmvnic_adapter *adapter;
4454 struct net_device *netdev;
4455 unsigned char *mac_addr_p;
4458 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
4461 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
4462 VETH_MAC_ADDR, NULL);
4465 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4466 __FILE__, __LINE__);
4470 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
4471 IBMVNIC_MAX_QUEUES);
4475 adapter = netdev_priv(netdev);
4476 adapter->state = VNIC_PROBING;
4477 dev_set_drvdata(&dev->dev, netdev);
4478 adapter->vdev = dev;
4479 adapter->netdev = netdev;
4481 ether_addr_copy(adapter->mac_addr, mac_addr_p);
4482 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4483 netdev->irq = dev->irq;
4484 netdev->netdev_ops = &ibmvnic_netdev_ops;
4485 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
4486 SET_NETDEV_DEV(netdev, &dev->dev);
4488 spin_lock_init(&adapter->stats_lock);
4490 INIT_LIST_HEAD(&adapter->errors);
4491 spin_lock_init(&adapter->error_list_lock);
4493 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4494 INIT_LIST_HEAD(&adapter->rwi_list);
4495 mutex_init(&adapter->reset_lock);
4496 mutex_init(&adapter->rwi_lock);
4497 adapter->resetting = false;
4499 adapter->mac_change_pending = false;
4502 rc = ibmvnic_init(adapter);
4503 if (rc && rc != EAGAIN)
4504 goto ibmvnic_init_fail;
4505 } while (rc == EAGAIN);
4507 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4508 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4509 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4511 rc = device_create_file(&dev->dev, &dev_attr_failover);
4513 goto ibmvnic_init_fail;
4515 netif_carrier_off(netdev);
4516 rc = register_netdev(netdev);
4518 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
4519 goto ibmvnic_register_fail;
4521 dev_info(&dev->dev, "ibmvnic registered\n");
4523 adapter->state = VNIC_PROBED;
4525 adapter->wait_for_reset = false;
4529 ibmvnic_register_fail:
4530 device_remove_file(&dev->dev, &dev_attr_failover);
4533 release_sub_crqs(adapter, 1);
4534 release_crq_queue(adapter);
4535 free_netdev(netdev);
4540 static int ibmvnic_remove(struct vio_dev *dev)
4542 struct net_device *netdev = dev_get_drvdata(&dev->dev);
4543 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4545 adapter->state = VNIC_REMOVING;
4546 unregister_netdev(netdev);
4547 mutex_lock(&adapter->reset_lock);
4549 release_resources(adapter);
4550 release_sub_crqs(adapter, 1);
4551 release_crq_queue(adapter);
4553 release_stats_token(adapter);
4554 release_stats_buffers(adapter);
4556 adapter->state = VNIC_REMOVED;
4558 mutex_unlock(&adapter->reset_lock);
4559 device_remove_file(&dev->dev, &dev_attr_failover);
4560 free_netdev(netdev);
4561 dev_set_drvdata(&dev->dev, NULL);
4566 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
4567 const char *buf, size_t count)
4569 struct net_device *netdev = dev_get_drvdata(dev);
4570 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4571 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
4572 __be64 session_token;
4575 if (!sysfs_streq(buf, "1"))
4578 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
4579 H_GET_SESSION_TOKEN, 0, 0, 0);
4581 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
4586 session_token = (__be64)retbuf[0];
4587 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
4588 be64_to_cpu(session_token));
4589 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4590 H_SESSION_ERR_DETECTED, session_token, 0, 0);
4592 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
4600 static DEVICE_ATTR_WO(failover);
4602 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
4604 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
4605 struct ibmvnic_adapter *adapter;
4606 struct iommu_table *tbl;
4607 unsigned long ret = 0;
4610 tbl = get_iommu_table_base(&vdev->dev);
4612 /* netdev inits at probe time along with the structures we need below*/
4614 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
4616 adapter = netdev_priv(netdev);
4618 ret += PAGE_SIZE; /* the crq message queue */
4619 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
4621 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
4622 ret += 4 * PAGE_SIZE; /* the scrq message queue */
4624 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4626 ret += adapter->rx_pool[i].size *
4627 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
4632 static int ibmvnic_resume(struct device *dev)
4634 struct net_device *netdev = dev_get_drvdata(dev);
4635 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4637 if (adapter->state != VNIC_OPEN)
4640 tasklet_schedule(&adapter->tasklet);
4645 static const struct vio_device_id ibmvnic_device_table[] = {
4646 {"network", "IBM,vnic"},
4649 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
4651 static const struct dev_pm_ops ibmvnic_pm_ops = {
4652 .resume = ibmvnic_resume
4655 static struct vio_driver ibmvnic_driver = {
4656 .id_table = ibmvnic_device_table,
4657 .probe = ibmvnic_probe,
4658 .remove = ibmvnic_remove,
4659 .get_desired_dma = ibmvnic_get_desired_dma,
4660 .name = ibmvnic_driver_name,
4661 .pm = &ibmvnic_pm_ops,
4664 /* module functions */
4665 static int __init ibmvnic_module_init(void)
4667 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
4668 IBMVNIC_DRIVER_VERSION);
4670 return vio_register_driver(&ibmvnic_driver);
4673 static void __exit ibmvnic_module_exit(void)
4675 vio_unregister_driver(&ibmvnic_driver);
4678 module_init(ibmvnic_module_init);
4679 module_exit(ibmvnic_module_exit);