]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge tag 'mlx5-updates-2017-06-11' of git://git.kernel.org/pub/scm/linux/kernel...
authorDavid S. Miller <davem@davemloft.net>
Sun, 11 Jun 2017 22:10:42 +0000 (18:10 -0400)
committerDavid S. Miller <davem@davemloft.net>
Sun, 11 Jun 2017 22:10:42 +0000 (18:10 -0400)
Saeed Mahameed says:

====================
mlx5-updates-2017-06-11

This series provides updates to mlx5 header rewrite feature, from Or Gerlitz.
and three more small updates From maor and eran.

-------
Or says:

Packets belonging to flows which are different by matching may still need
to go through the same header re-writes (e.g set the current routing hop
MACs and issue TTL decrement).  To minimize the number of modify header
IDs, we add a cache for header re-write IDs which is keyed by the binary
chain of modify header actions.

The caching is supported for both eswitch and NIC use-cases, where the
actual conversion of the code to use caching comes in separate patches,
one per use-case.

Using a per field mask field, the TC pedit action supports modifying
partial fields. The last patch enables offloading that.
-------

From Maor, update flow table commands layout to the latest HW spec.
From Eran, ethtool connector type reporting updates.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
147 files changed:
Documentation/networking/rxrpc.txt
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/hw/qedr/qedr.h
drivers/infiniband/hw/qedr/qedr_cm.c
drivers/isdn/hardware/mISDN/mISDNipac.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/chip.h
drivers/net/dsa/mv88e6xxx/phy.c
drivers/net/dsa/mv88e6xxx/phy.h
drivers/net/dsa/mv88e6xxx/port.c
drivers/net/dsa/mv88e6xxx/port.h
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/ibm/emac/phy.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/netronome/nfp/nfp_app.c
drivers/net/ethernet/netronome/nfp/nfp_app.h
drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
drivers/net/ethernet/netronome/nfp/nfp_main.c
drivers/net/ethernet/netronome/nfp/nfp_main.h
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/netronome/nfp/nfp_net_main.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_ll2.h
drivers/net/ethernet/qlogic/qed/qed_roce.c
drivers/net/ethernet/qlogic/qed/qed_roce.h
drivers/net/ethernet/qlogic/qed/qed_vf.h
drivers/net/ethernet/rocker/rocker.h
drivers/net/ethernet/rocker/rocker_main.c
drivers/net/ethernet/rocker/rocker_ofdpa.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/falcon/efx.h
drivers/net/ethernet/sfc/falcon/tx.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/ti/netcp_core.c
drivers/net/geneve.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/sungem_phy.c
drivers/net/team/team.c
drivers/net/usb/r8152.c
fs/afs/rxrpc.c
include/linux/inetdevice.h
include/linux/netdevice.h
include/linux/qed/qed_ll2_if.h
include/linux/qed/qed_roce_if.h
include/linux/rxrpc.h
include/net/addrconf.h
include/net/af_rxrpc.h
include/net/netns/ipv4.h
include/net/secure_seq.h
include/net/sock.h
include/net/switchdev.h
include/net/tcp.h
include/uapi/linux/bpf.h
include/uapi/linux/mroute.h
include/uapi/linux/neighbour.h
include/uapi/linux/snmp.h
kernel/bpf/verifier.c
kernel/trace/bpf_trace.c
net/8021q/vlan_dev.c
net/bridge/br.c
net/bridge/br_fdb.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_switchdev.c
net/core/filter.c
net/core/net_namespace.c
net/core/secure_seq.c
net/core/skbuff.c
net/core/sock.c
net/decnet/af_decnet.c
net/dsa/slave.c
net/dsa/switch.c
net/ipv4/devinet.c
net/ipv4/ipmr.c
net/ipv4/proc.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv6/addrconf.c
net/ipv6/addrconf_core.c
net/ipv6/ip6_output.c
net/ipv6/syncookies.c
net/ipv6/tcp_ipv6.c
net/packet/af_packet.c
net/qrtr/qrtr.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/call_object.c
net/rxrpc/sendmsg.c
net/sched/cls_bpf.c
net/sched/cls_flower.c
net/sched/cls_matchall.c
net/sched/cls_u32.c
net/sched/sch_mqprio.c
net/sctp/associola.c
net/sctp/proc.c
net/sctp/socket.c
net/switchdev/switchdev.c
tools/include/uapi/linux/bpf.h
tools/testing/selftests/bpf/test_align.c
tools/testing/selftests/bpf/test_maps.c
tools/testing/selftests/bpf/test_obj_id.c
tools/testing/selftests/bpf/test_progs.c

index 18078e630a6325abdd9746a7c385ec8867bcbd15..8c70ba5dee4d0072da0ac8666d987248758afa0f 100644 (file)
@@ -327,6 +327,7 @@ calls, to invoke certain actions and to report certain conditions.  These are:
        RXRPC_ACCEPT            s-- n/a         Accept new call
        RXRPC_EXCLUSIVE_CALL    s-- n/a         Make an exclusive client call
        RXRPC_UPGRADE_SERVICE   s-- n/a         Client call can be upgraded
+       RXRPC_TX_LENGTH         s-- data len    Total length of Tx data
 
        (SRT = usable in Sendmsg / delivered by Recvmsg / Terminal message)
 
@@ -406,6 +407,23 @@ calls, to invoke certain actions and to report certain conditions.  These are:
      future communication to that server and RXRPC_UPGRADE_SERVICE should no
      longer be set.
 
+ (*) RXRPC_TX_LENGTH
+
+     This is used to inform the kernel of the total amount of data that is
+     going to be transmitted by a call (whether in a client request or a
+     service response).  If given, it allows the kernel to encrypt from the
+     userspace buffer directly to the packet buffers, rather than copying into
+     the buffer and then encrypting in place.  This may only be given with the
+     first sendmsg() providing data for a call.  EMSGSIZE will be generated if
+     the amount of data actually given is different.
+
+     This takes a parameter of __s64 type that indicates how much will be
+     transmitted.  This may not be less than zero.
+
+The symbol RXRPC__SUPPORTED is defined as one more than the highest control
+message type supported.  At run time this can be queried by means of the
+RXRPC_SUPPORTED_CMSG socket option (see below).
+
 
 ==============
 SOCKET OPTIONS
@@ -459,6 +477,11 @@ AF_RXRPC sockets support a few socket options at the SOL_RXRPC level:
      must point to an array of two unsigned short ints.  The first is the
      service ID to upgrade from and the second the service ID to upgrade to.
 
+ (*) RXRPC_SUPPORTED_CMSG
+
+     This is a read-only option that writes an int into the buffer indicating
+     the highest control message type supported.
+
 
 ========
 SECURITY
@@ -568,6 +591,9 @@ A client would issue an operation by:
      MSG_MORE should be set in msghdr::msg_flags on all but the last part of
      the request.  Multiple requests may be made simultaneously.
 
+     An RXRPC_TX_LENGTH control message can also be specified on the first
+     sendmsg() call.
+
      If a call is intended to go to a destination other than the default
      specified through connect(), then msghdr::msg_name should be set on the
      first request message of that call.
@@ -755,6 +781,7 @@ The kernel interface functions are as follows:
                                struct sockaddr_rxrpc *srx,
                                struct key *key,
                                unsigned long user_call_ID,
+                               s64 tx_total_len,
                                gfp_t gfp);
 
      This allocates the infrastructure to make a new RxRPC call and assigns
@@ -771,6 +798,11 @@ The kernel interface functions are as follows:
      control data buffer.  It is entirely feasible to use this to point to a
      kernel data structure.
 
+     tx_total_len is the amount of data the caller is intending to transmit
+     with this call (or -1 if unknown at this point).  Setting the data size
+     allows the kernel to encrypt directly to the packet buffers, thereby
+     saving a copy.  The value may not be less than -1.
+
      If this function is successful, an opaque reference to the RxRPC call is
      returned.  The caller now holds a reference on this and it must be
      properly ended.
@@ -922,6 +954,17 @@ The kernel interface functions are as follows:
 
      This is used to find the remote peer address of a call.
 
+ (*) Set the total transmit data size on a call.
+
+       void rxrpc_kernel_set_tx_length(struct socket *sock,
+                                       struct rxrpc_call *call,
+                                       s64 tx_total_len);
+
+     This sets the amount of data that the caller is intending to transmit on a
+     call.  It's intended to be used for setting the reply size as the request
+     size should be set when the call is begun.  tx_total_len may not be less
+     than zero.
+
 
 =======================
 CONFIGURABLE PARAMETERS
index b0ae4f0c8aa7f143bed5967b97c7006ad61db4a1..2f1136bf7b1f6bc9721c9883f54900d9195dedf3 100644 (file)
@@ -3756,7 +3756,7 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
         */
        memset(&tmp_opt, 0, sizeof(tmp_opt));
        tcp_clear_options(&tmp_opt);
-       tcp_parse_options(skb, &tmp_opt, 0, NULL);
+       tcp_parse_options(&init_net, skb, &tmp_opt, 0, NULL);
 
        req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
        memset(req, 0, sizeof(*req));
index 6a72095d6c7acf7b4c62ce6720118f8a128de523..485c1fef238bc45b7e83fa14064ce1f33de78b56 100644 (file)
@@ -886,9 +886,9 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
        memcpy(&sgid->raw[8], guid, sizeof(guid));
 
        /* Update LL2 */
-       rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev,
-                                              dev->gsi_ll2_mac_address,
-                                              dev->ndev->dev_addr);
+       rc = dev->ops->ll2_set_mac_filter(dev->cdev,
+                                         dev->gsi_ll2_mac_address,
+                                         dev->ndev->dev_addr);
 
        ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
 
index aa08c76a42450a2ef86d1854b45a80d7bcd4387c..80333ec2c8b66d6061e7fc62492c5402395f5074 100644 (file)
@@ -150,6 +150,8 @@ struct qedr_dev {
        u32                     dp_module;
        u8                      dp_level;
        u8                      num_hwfns;
+       u8                      gsi_ll2_handle;
+
        uint                    wq_multiplier;
        u8                      gsi_ll2_mac_address[ETH_ALEN];
        int                     gsi_qp_created;
index d86dbe814d98fbe00adf22acb6d1ee658efc5ef5..eb3dce72fc21dac1844016cb75d6a5c66a0feec9 100644 (file)
@@ -64,9 +64,14 @@ void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
        dev->gsi_qp = qp;
 }
 
-void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
+void qedr_ll2_complete_tx_packet(void *cxt,
+                                u8 connection_handle,
+                                void *cookie,
+                                dma_addr_t first_frag_addr,
+                                bool b_last_fragment, bool b_last_packet)
 {
-       struct qedr_dev *dev = (struct qedr_dev *)_qdev;
+       struct qedr_dev *dev = (struct qedr_dev *)cxt;
+       struct qed_roce_ll2_packet *pkt = cookie;
        struct qedr_cq *cq = dev->gsi_sqcq;
        struct qedr_qp *qp = dev->gsi_qp;
        unsigned long flags;
@@ -88,20 +93,26 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
                (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
 }
 
-void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
-                   struct qed_roce_ll2_rx_params *params)
+void qedr_ll2_complete_rx_packet(void *cxt,
+                                struct qed_ll2_comp_rx_data *data)
 {
-       struct qedr_dev *dev = (struct qedr_dev *)_dev;
+       struct qedr_dev *dev = (struct qedr_dev *)cxt;
        struct qedr_cq *cq = dev->gsi_rqcq;
        struct qedr_qp *qp = dev->gsi_qp;
        unsigned long flags;
 
        spin_lock_irqsave(&qp->q_lock, flags);
 
-       qp->rqe_wr_id[qp->rq.gsi_cons].rc = params->rc;
-       qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = params->vlan_id;
-       qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len;
-       ether_addr_copy(qp->rqe_wr_id[qp->rq.gsi_cons].smac, params->smac);
+       qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
+               -EINVAL : 0;
+       qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan;
+       /* note: length stands for data length i.e. GRH is excluded */
+       qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
+               data->length.data_length;
+       *((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) =
+               ntohl(data->opaque_data_0);
+       *((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) =
+               ntohs((u16)data->opaque_data_1);
 
        qedr_inc_sw_gsi_cons(&qp->rq);
 
@@ -111,6 +122,14 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
                (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
 }
 
+void qedr_ll2_release_rx_packet(void *cxt,
+                               u8 connection_handle,
+                               void *cookie,
+                               dma_addr_t rx_buf_addr, bool b_last_packet)
+{
+       /* Do nothing... */
+}
+
 static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
                                struct ib_qp_init_attr *attrs)
 {
@@ -159,27 +178,159 @@ static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev,
        return 0;
 }
 
+static int qedr_ll2_post_tx(struct qedr_dev *dev,
+                           struct qed_roce_ll2_packet *pkt)
+{
+       enum qed_ll2_roce_flavor_type roce_flavor;
+       struct qed_ll2_tx_pkt_info ll2_tx_pkt;
+       int rc;
+       int i;
+
+       memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt));
+
+       roce_flavor = (pkt->roce_mode == ROCE_V1) ?
+           QED_LL2_ROCE : QED_LL2_RROCE;
+
+       if (pkt->roce_mode == ROCE_V2_IPV4)
+               ll2_tx_pkt.enable_ip_cksum = 1;
+
+       ll2_tx_pkt.num_of_bds = 1 /* hdr */  + pkt->n_seg;
+       ll2_tx_pkt.vlan = 0;
+       ll2_tx_pkt.tx_dest = pkt->tx_dest;
+       ll2_tx_pkt.qed_roce_flavor = roce_flavor;
+       ll2_tx_pkt.first_frag = pkt->header.baddr;
+       ll2_tx_pkt.first_frag_len = pkt->header.len;
+       ll2_tx_pkt.cookie = pkt;
+
+       /* tx header */
+       rc = dev->ops->ll2_prepare_tx_packet(dev->rdma_ctx,
+                                            dev->gsi_ll2_handle,
+                                            &ll2_tx_pkt, 1);
+       if (rc) {
+               /* TX failed while posting header - release resources */
+               dma_free_coherent(&dev->pdev->dev, pkt->header.len,
+                                 pkt->header.vaddr, pkt->header.baddr);
+               kfree(pkt);
+
+               DP_ERR(dev, "roce ll2 tx: header failed (rc=%d)\n", rc);
+               return rc;
+       }
+
+       /* tx payload */
+       for (i = 0; i < pkt->n_seg; i++) {
+               rc = dev->ops->ll2_set_fragment_of_tx_packet(
+                       dev->rdma_ctx,
+                       dev->gsi_ll2_handle,
+                       pkt->payload[i].baddr,
+                       pkt->payload[i].len);
+
+               if (rc) {
+                       /* if failed not much to do here, partial packet has
+                        * been posted we can't free memory, will need to wait
+                        * for completion
+                        */
+                       DP_ERR(dev, "ll2 tx: payload failed (rc=%d)\n", rc);
+                       return rc;
+               }
+       }
+
+       return 0;
+}
+
+int qedr_ll2_stop(struct qedr_dev *dev)
+{
+       int rc;
+
+       if (dev->gsi_ll2_handle == QED_LL2_UNUSED_HANDLE)
+               return 0;
+
+       /* remove LL2 MAC address filter */
+       rc = dev->ops->ll2_set_mac_filter(dev->cdev,
+                                         dev->gsi_ll2_mac_address, NULL);
+
+       rc = dev->ops->ll2_terminate_connection(dev->rdma_ctx,
+                                               dev->gsi_ll2_handle);
+       if (rc)
+               DP_ERR(dev, "Failed to terminate LL2 connection (rc=%d)\n", rc);
+
+       dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
+
+       dev->gsi_ll2_handle = QED_LL2_UNUSED_HANDLE;
+
+       return rc;
+}
+
+int qedr_ll2_start(struct qedr_dev *dev,
+                  struct ib_qp_init_attr *attrs, struct qedr_qp *qp)
+{
+       struct qed_ll2_acquire_data data;
+       struct qed_ll2_cbs cbs;
+       int rc;
+
+       /* configure and start LL2 */
+       cbs.rx_comp_cb = qedr_ll2_complete_rx_packet;
+       cbs.tx_comp_cb = qedr_ll2_complete_tx_packet;
+       cbs.rx_release_cb = qedr_ll2_release_rx_packet;
+       cbs.tx_release_cb = qedr_ll2_complete_tx_packet;
+       cbs.cookie = dev;
+
+       memset(&data, 0, sizeof(data));
+       data.input.conn_type = QED_LL2_TYPE_ROCE;
+       data.input.mtu = dev->ndev->mtu;
+       data.input.rx_num_desc = attrs->cap.max_recv_wr;
+       data.input.rx_drop_ttl0_flg = true;
+       data.input.rx_vlan_removal_en = false;
+       data.input.tx_num_desc = attrs->cap.max_send_wr;
+       data.input.tx_tc = 0;
+       data.input.tx_dest = QED_LL2_TX_DEST_NW;
+       data.input.ai_err_packet_too_big = QED_LL2_DROP_PACKET;
+       data.input.ai_err_no_buf = QED_LL2_DROP_PACKET;
+       data.input.gsi_enable = 1;
+       data.p_connection_handle = &dev->gsi_ll2_handle;
+       data.cbs = &cbs;
+
+       rc = dev->ops->ll2_acquire_connection(dev->rdma_ctx, &data);
+       if (rc) {
+               DP_ERR(dev,
+                      "ll2 start: failed to acquire LL2 connection (rc=%d)\n",
+                      rc);
+               return rc;
+       }
+
+       rc = dev->ops->ll2_establish_connection(dev->rdma_ctx,
+                                               dev->gsi_ll2_handle);
+       if (rc) {
+               DP_ERR(dev,
+                      "ll2 start: failed to establish LL2 connection (rc=%d)\n",
+                      rc);
+               goto err1;
+       }
+
+       rc = dev->ops->ll2_set_mac_filter(dev->cdev, NULL, dev->ndev->dev_addr);
+       if (rc)
+               goto err2;
+
+       return 0;
+
+err2:
+       dev->ops->ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
+err1:
+       dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
+
+       return rc;
+}
+
 struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
                                 struct ib_qp_init_attr *attrs,
                                 struct qedr_qp *qp)
 {
-       struct qed_roce_ll2_params ll2_params;
        int rc;
 
        rc = qedr_check_gsi_qp_attrs(dev, attrs);
        if (rc)
                return ERR_PTR(rc);
 
-       /* configure and start LL2 */
-       memset(&ll2_params, 0, sizeof(ll2_params));
-       ll2_params.max_tx_buffers = attrs->cap.max_send_wr;
-       ll2_params.max_rx_buffers = attrs->cap.max_recv_wr;
-       ll2_params.cbs.tx_cb = qedr_ll2_tx_cb;
-       ll2_params.cbs.rx_cb = qedr_ll2_rx_cb;
-       ll2_params.cb_cookie = (void *)dev;
-       ll2_params.mtu = dev->ndev->mtu;
-       ether_addr_copy(ll2_params.mac_address, dev->ndev->dev_addr);
-       rc = dev->ops->roce_ll2_start(dev->cdev, &ll2_params);
+       rc = qedr_ll2_start(dev, attrs, qp);
        if (rc) {
                DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
                return ERR_PTR(rc);
@@ -214,7 +365,7 @@ struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
 err:
        kfree(qp->rqe_wr_id);
 
-       rc = dev->ops->roce_ll2_stop(dev->cdev);
+       rc = qedr_ll2_stop(dev);
        if (rc)
                DP_ERR(dev, "create gsi qp: failed destroy on create\n");
 
@@ -223,15 +374,7 @@ struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
 
 int qedr_destroy_gsi_qp(struct qedr_dev *dev)
 {
-       int rc;
-
-       rc = dev->ops->roce_ll2_stop(dev->cdev);
-       if (rc)
-               DP_ERR(dev, "destroy gsi qp: failed (rc=%d)\n", rc);
-       else
-               DP_DEBUG(dev, QEDR_MSG_GSI, "destroy gsi qp: success\n");
-
-       return rc;
+       return qedr_ll2_stop(dev);
 }
 
 #define QEDR_MAX_UD_HEADER_SIZE        (100)
@@ -421,7 +564,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 {
        struct qed_roce_ll2_packet *pkt = NULL;
        struct qedr_qp *qp = get_qedr_qp(ibqp);
-       struct qed_roce_ll2_tx_params params;
        struct qedr_dev *dev = qp->dev;
        unsigned long flags;
        int rc;
@@ -449,8 +591,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                goto err;
        }
 
-       memset(&params, 0, sizeof(params));
-
        spin_lock_irqsave(&qp->q_lock, flags);
 
        rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
@@ -459,7 +599,8 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                goto err;
        }
 
-       rc = dev->ops->roce_ll2_tx(dev->cdev, pkt, &params);
+       rc = qedr_ll2_post_tx(dev, pkt);
+
        if (!rc) {
                qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
                qedr_inc_sw_prod(&qp->sq);
@@ -467,17 +608,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                         "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
                         wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
        } else {
-               if (rc == QED_ROCE_TX_HEAD_FAILURE) {
-                       /* TX failed while posting header - release resources */
-                       dma_free_coherent(&dev->pdev->dev, pkt->header.len,
-                                         pkt->header.vaddr, pkt->header.baddr);
-                       kfree(pkt);
-               } else if (rc == QED_ROCE_TX_FRAG_FAILURE) {
-                       /* NTD since TX failed while posting a fragment. We will
-                        * release the resources on TX callback
-                        */
-               }
-
                DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
                rc = -EAGAIN;
                *bad_wr = wr;
@@ -504,10 +634,8 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 {
        struct qedr_dev *dev = get_qedr_dev(ibqp->device);
        struct qedr_qp *qp = get_qedr_qp(ibqp);
-       struct qed_roce_ll2_buffer buf;
        unsigned long flags;
-       int status = 0;
-       int rc;
+       int rc = 0;
 
        if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
            (qp->state != QED_ROCE_QP_STATE_RTS)) {
@@ -518,8 +646,6 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
                return -EINVAL;
        }
 
-       memset(&buf, 0, sizeof(buf));
-
        spin_lock_irqsave(&qp->q_lock, flags);
 
        while (wr) {
@@ -530,10 +656,12 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
                        goto err;
                }
 
-               buf.baddr = wr->sg_list[0].addr;
-               buf.len = wr->sg_list[0].length;
-
-               rc = dev->ops->roce_ll2_post_rx_buffer(dev->cdev, &buf, 0, 1);
+               rc = dev->ops->ll2_post_rx_buffer(dev->rdma_ctx,
+                                                 dev->gsi_ll2_handle,
+                                                 wr->sg_list[0].addr,
+                                                 wr->sg_list[0].length,
+                                                 0 /* cookie */,
+                                                 1 /* notify_fw */);
                if (rc) {
                        DP_ERR(dev,
                               "gsi post recv: failed to post rx buffer (rc=%d)\n",
@@ -553,7 +681,7 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 
        spin_unlock_irqrestore(&qp->q_lock, flags);
 
-       return status;
+       return rc;
 err:
        spin_unlock_irqrestore(&qp->q_lock, flags);
        *bad_wr = wr;
index 6742b0dc082115df347955c77c619e5a47ffb2d6..e240010b93fa0b568517fb696f8053b9f8ae28f8 100644 (file)
@@ -364,8 +364,8 @@ isac_mos_irq(struct isac_hw *isac)
                        WriteISAC(isac, ISAC_MOCR, isac->mocr);
                        if (isac->mon_txc && (isac->mon_txp >= isac->mon_txc)) {
                                if (isac->monitor)
-                                       ret = isac->monitor(isac->dch.hw,
-                                                           MONITOR_TX_0, NULL, 0);
+                                       isac->monitor(isac->dch.hw,
+                                                     MONITOR_TX_0, NULL, 0);
                        }
                        kfree(isac->mon_tx);
                        isac->mon_tx = NULL;
@@ -375,8 +375,8 @@ isac_mos_irq(struct isac_hw *isac)
                }
                if (isac->mon_txc && (isac->mon_txp >= isac->mon_txc)) {
                        if (isac->monitor)
-                               ret = isac->monitor(isac->dch.hw,
-                                                   MONITOR_TX_0, NULL, 0);
+                               isac->monitor(isac->dch.hw,
+                                             MONITOR_TX_0, NULL, 0);
                        kfree(isac->mon_tx);
                        isac->mon_tx = NULL;
                        isac->mon_txc = 0;
@@ -397,8 +397,8 @@ isac_mos_irq(struct isac_hw *isac)
                        WriteISAC(isac, ISAC_MOCR, isac->mocr);
                        if (isac->mon_txc && (isac->mon_txp >= isac->mon_txc)) {
                                if (isac->monitor)
-                                       ret = isac->monitor(isac->dch.hw,
-                                                           MONITOR_TX_1, NULL, 0);
+                                       isac->monitor(isac->dch.hw,
+                                                     MONITOR_TX_1, NULL, 0);
                        }
                        kfree(isac->mon_tx);
                        isac->mon_tx = NULL;
@@ -408,8 +408,8 @@ isac_mos_irq(struct isac_hw *isac)
                }
                if (isac->mon_txc && (isac->mon_txp >= isac->mon_txc)) {
                        if (isac->monitor)
-                               ret = isac->monitor(isac->dch.hw,
-                                                   MONITOR_TX_1, NULL, 0);
+                               isac->monitor(isac->dch.hw,
+                                             MONITOR_TX_1, NULL, 0);
                        kfree(isac->mon_tx);
                        isac->mon_tx = NULL;
                        isac->mon_txc = 0;
index b44a6aeb346d0404144dde0304a502268de1da91..165a8009c640100c45d5022575bedb0c88af1a04 100644 (file)
@@ -322,6 +322,11 @@ static u16 __get_link_speed(struct port *port)
 
                default:
                        /* unknown speed value from ethtool. shouldn't happen */
+                       if (slave->speed != SPEED_UNKNOWN)
+                               pr_warn_once("%s: unknown ethtool speed (%d) for port %d (set it to 0)\n",
+                                            slave->bond->dev->name,
+                                            slave->speed,
+                                            port->actor_port_number);
                        speed = 0;
                        break;
                }
index d4484d1a81644826bb30a98681f32e3b81f69485..7d9474352c36164b8ddb2c77a7f60416d8296f6d 100644 (file)
@@ -4175,12 +4175,6 @@ static const struct net_device_ops bond_netdev_ops = {
        .ndo_add_slave          = bond_enslave,
        .ndo_del_slave          = bond_release,
        .ndo_fix_features       = bond_fix_features,
-       .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
-       .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
-       .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
-       .ndo_fdb_add            = switchdev_port_fdb_add,
-       .ndo_fdb_del            = switchdev_port_fdb_del,
-       .ndo_fdb_dump           = switchdev_port_fdb_dump,
        .ndo_features_check     = passthru_features_check,
 };
 
index 44c87027623b696b63566e03e72df4bf047b6fc7..a4cf0366765facf4fb067c146fb19ef14adcaa7c 100644 (file)
@@ -489,8 +489,7 @@ static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
        err = 0;
 restore_link:
        if (chip->info->ops->port_set_link(chip, port, link))
-               netdev_err(chip->ds->ports[port].netdev,
-                          "failed to restore MAC's link\n");
+               dev_err(chip->dev, "p%d: failed to restore MAC's link\n", port);
 
        return err;
 }
@@ -514,7 +513,7 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
        mutex_unlock(&chip->reg_lock);
 
        if (err && err != -EOPNOTSUPP)
-               netdev_err(ds->ports[port].netdev, "failed to configure MAC\n");
+               dev_err(ds->dev, "p%d: failed to configure MAC\n", port);
 }
 
 static int mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
@@ -916,32 +915,14 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
                                         u8 state)
 {
        struct mv88e6xxx_chip *chip = ds->priv;
-       int stp_state;
        int err;
 
-       switch (state) {
-       case BR_STATE_DISABLED:
-               stp_state = PORT_CONTROL_STATE_DISABLED;
-               break;
-       case BR_STATE_BLOCKING:
-       case BR_STATE_LISTENING:
-               stp_state = PORT_CONTROL_STATE_BLOCKING;
-               break;
-       case BR_STATE_LEARNING:
-               stp_state = PORT_CONTROL_STATE_LEARNING;
-               break;
-       case BR_STATE_FORWARDING:
-       default:
-               stp_state = PORT_CONTROL_STATE_FORWARDING;
-               break;
-       }
-
        mutex_lock(&chip->reg_lock);
-       err = mv88e6xxx_port_set_state(chip, port, stp_state);
+       err = mv88e6xxx_port_set_state(chip, port, state);
        mutex_unlock(&chip->reg_lock);
 
        if (err)
-               netdev_err(ds->ports[port].netdev, "failed to update state\n");
+               dev_err(ds->dev, "p%d: failed to update state\n", port);
 }
 
 static int mv88e6xxx_atu_setup(struct mv88e6xxx_chip *chip)
@@ -1009,7 +990,7 @@ static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
        mutex_unlock(&chip->reg_lock);
 
        if (err)
-               netdev_err(ds->ports[port].netdev, "failed to flush ATU\n");
+               dev_err(ds->dev, "p%d: failed to flush ATU\n", port);
 }
 
 static int mv88e6xxx_vtu_setup(struct mv88e6xxx_chip *chip)
@@ -1159,11 +1140,10 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
                entry->valid = true;
                entry->vid = vid;
 
-               /* Include only CPU and DSA ports */
+               /* Exclude all ports */
                for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
-                       entry->member[i] = dsa_is_normal_port(chip->ds, i) ?
-                               GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER :
-                               GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED;
+                       entry->member[i] =
+                               GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
 
                return mv88e6xxx_atu_new(chip, &entry->fid);
        }
@@ -1215,10 +1195,9 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
                        if (!ds->ports[i].bridge_dev)
                                continue;
 
-                       netdev_warn(ds->ports[port].netdev,
-                                   "hardware VLAN %d already used by %s\n",
-                                   vlan.vid,
-                                   netdev_name(ds->ports[i].bridge_dev));
+                       dev_err(ds->dev, "p%d: hw VLAN %d already used by %s\n",
+                               port, vlan.vid,
+                               netdev_name(ds->ports[i].bridge_dev));
                        err = -EOPNOTSUPP;
                        goto unlock;
                }
@@ -1274,7 +1253,7 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
 }
 
 static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port,
-                                   u16 vid, bool untagged)
+                                   u16 vid, u8 member)
 {
        struct mv88e6xxx_vtu_entry vlan;
        int err;
@@ -1283,9 +1262,7 @@ static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port,
        if (err)
                return err;
 
-       vlan.member[port] = untagged ?
-               GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
-               GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
+       vlan.member[port] = member;
 
        return mv88e6xxx_vtu_loadpurge(chip, &vlan);
 }
@@ -1297,22 +1274,29 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
        struct mv88e6xxx_chip *chip = ds->priv;
        bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
        bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+       u8 member;
        u16 vid;
 
        if (!chip->info->max_vid)
                return;
 
+       if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+               member = GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED;
+       else if (untagged)
+               member = GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED;
+       else
+               member = GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
+
        mutex_lock(&chip->reg_lock);
 
        for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
-               if (_mv88e6xxx_port_vlan_add(chip, port, vid, untagged))
-                       netdev_err(ds->ports[port].netdev,
-                                  "failed to add VLAN %d%c\n",
-                                  vid, untagged ? 'u' : 't');
+               if (_mv88e6xxx_port_vlan_add(chip, port, vid, member))
+                       dev_err(ds->dev, "p%d: failed to add VLAN %d%c\n", port,
+                               vid, untagged ? 'u' : 't');
 
        if (pvid && mv88e6xxx_port_set_pvid(chip, port, vlan->vid_end))
-               netdev_err(ds->ports[port].netdev, "failed to set PVID %d\n",
-                          vlan->vid_end);
+               dev_err(ds->dev, "p%d: failed to set PVID %d\n", port,
+                       vlan->vid_end);
 
        mutex_unlock(&chip->reg_lock);
 }
@@ -1320,7 +1304,6 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
 static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
                                    int port, u16 vid)
 {
-       struct dsa_switch *ds = chip->ds;
        struct mv88e6xxx_vtu_entry vlan;
        int i, err;
 
@@ -1337,9 +1320,6 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
        /* keep the VLAN unless all ports are excluded */
        vlan.valid = false;
        for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
-               if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
-                       continue;
-
                if (vlan.member[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
                        vlan.valid = true;
                        break;
@@ -1450,7 +1430,8 @@ static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
        mutex_lock(&chip->reg_lock);
        if (mv88e6xxx_port_db_load_purge(chip, port, fdb->addr, fdb->vid,
                                         GLOBAL_ATU_DATA_STATE_UC_STATIC))
-               netdev_err(ds->ports[port].netdev, "failed to load unicast MAC address\n");
+               dev_err(ds->dev, "p%d: failed to load unicast MAC address\n",
+                       port);
        mutex_unlock(&chip->reg_lock);
 }
 
@@ -1695,8 +1676,7 @@ static int mv88e6xxx_disable_ports(struct mv88e6xxx_chip *chip)
 
        /* Set all ports to the Disabled state */
        for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
-               err = mv88e6xxx_port_set_state(chip, i,
-                                              PORT_CONTROL_STATE_DISABLED);
+               err = mv88e6xxx_port_set_state(chip, i, BR_STATE_DISABLED);
                if (err)
                        return err;
        }
@@ -1723,8 +1703,8 @@ static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
 }
 
 static int mv88e6xxx_set_port_mode(struct mv88e6xxx_chip *chip, int port,
-                                  enum mv88e6xxx_frame_mode frame, u16 egress,
-                                  u16 etype)
+                                  enum mv88e6xxx_frame_mode frame,
+                                  enum mv88e6xxx_egress_mode egress, u16 etype)
 {
        int err;
 
@@ -1748,14 +1728,14 @@ static int mv88e6xxx_set_port_mode(struct mv88e6xxx_chip *chip, int port,
 static int mv88e6xxx_set_port_mode_normal(struct mv88e6xxx_chip *chip, int port)
 {
        return mv88e6xxx_set_port_mode(chip, port, MV88E6XXX_FRAME_MODE_NORMAL,
-                                      PORT_CONTROL_EGRESS_UNMODIFIED,
+                                      MV88E6XXX_EGRESS_MODE_UNMODIFIED,
                                       PORT_ETH_TYPE_DEFAULT);
 }
 
 static int mv88e6xxx_set_port_mode_dsa(struct mv88e6xxx_chip *chip, int port)
 {
        return mv88e6xxx_set_port_mode(chip, port, MV88E6XXX_FRAME_MODE_DSA,
-                                      PORT_CONTROL_EGRESS_UNMODIFIED,
+                                      MV88E6XXX_EGRESS_MODE_UNMODIFIED,
                                       PORT_ETH_TYPE_DEFAULT);
 }
 
@@ -1763,7 +1743,8 @@ static int mv88e6xxx_set_port_mode_edsa(struct mv88e6xxx_chip *chip, int port)
 {
        return mv88e6xxx_set_port_mode(chip, port,
                                       MV88E6XXX_FRAME_MODE_ETHERTYPE,
-                                      PORT_CONTROL_EGRESS_ADD_TAG, ETH_P_EDSA);
+                                      MV88E6XXX_EGRESS_MODE_ETHERTYPE,
+                                      ETH_P_EDSA);
 }
 
 static int mv88e6xxx_setup_port_mode(struct mv88e6xxx_chip *chip, int port)
@@ -1895,8 +1876,8 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
        if (err)
                return err;
 
-       if (chip->info->ops->port_jumbo_config) {
-               err = chip->info->ops->port_jumbo_config(chip, port);
+       if (chip->info->ops->port_set_jumbo_size) {
+               err = chip->info->ops->port_set_jumbo_size(chip, port, 10240);
                if (err)
                        return err;
        }
@@ -1920,8 +1901,8 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
        if (err)
                return err;
 
-       if (chip->info->ops->port_pause_config) {
-               err = chip->info->ops->port_pause_config(chip, port);
+       if (chip->info->ops->port_pause_limit) {
+               err = chip->info->ops->port_pause_limit(chip, port, 0, 0);
                if (err)
                        return err;
        }
@@ -2034,14 +2015,14 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
        u32 upstream_port = dsa_upstream_port(ds);
        int err;
 
-       if (chip->info->ops->g1_set_cpu_port) {
-               err = chip->info->ops->g1_set_cpu_port(chip, upstream_port);
+       if (chip->info->ops->set_cpu_port) {
+               err = chip->info->ops->set_cpu_port(chip, upstream_port);
                if (err)
                        return err;
        }
 
-       if (chip->info->ops->g1_set_egress_port) {
-               err = chip->info->ops->g1_set_egress_port(chip, upstream_port);
+       if (chip->info->ops->set_egress_port) {
+               err = chip->info->ops->set_egress_port(chip, upstream_port);
                if (err)
                        return err;
        }
@@ -2381,15 +2362,15 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6097_port_pause_config,
+       .port_pause_limit = mv88e6097_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
        .stats_get_stats = mv88e6095_stats_get_stats,
-       .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .set_cpu_port = mv88e6095_g1_set_cpu_port,
+       .set_egress_port = mv88e6095_g1_set_egress_port,
        .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .ppu_enable = mv88e6185_g1_ppu_enable,
@@ -2434,17 +2415,17 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
-       .port_pause_config = mv88e6097_port_pause_config,
+       .port_pause_limit = mv88e6097_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
        .stats_get_stats = mv88e6095_stats_get_stats,
-       .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .set_cpu_port = mv88e6095_g1_set_cpu_port,
+       .set_egress_port = mv88e6095_g1_set_egress_port,
        .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -2468,8 +2449,8 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
        .stats_get_stats = mv88e6095_stats_get_stats,
-       .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .set_cpu_port = mv88e6095_g1_set_cpu_port,
+       .set_egress_port = mv88e6095_g1_set_egress_port,
        .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -2490,15 +2471,15 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
        .port_set_egress_floods = mv88e6185_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_set_upstream_port = mv88e6095_port_set_upstream_port,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6097_port_pause_config,
+       .port_pause_limit = mv88e6097_port_pause_limit,
        .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
        .stats_get_stats = mv88e6095_stats_get_stats,
-       .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .set_cpu_port = mv88e6095_g1_set_cpu_port,
+       .set_egress_port = mv88e6095_g1_set_egress_port,
        .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .ppu_enable = mv88e6185_g1_ppu_enable,
@@ -2523,17 +2504,17 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6097_port_pause_config,
+       .port_pause_limit = mv88e6097_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6390_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
        .stats_get_strings = mv88e6320_stats_get_strings,
        .stats_get_stats = mv88e6390_stats_get_stats,
-       .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .set_cpu_port = mv88e6390_g1_set_cpu_port,
+       .set_egress_port = mv88e6390_g1_set_egress_port,
        .watchdog_ops = &mv88e6390_watchdog_ops,
        .mgmt_rsvd2cpu =  mv88e6390_g1_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -2553,17 +2534,17 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6097_port_pause_config,
+       .port_pause_limit = mv88e6097_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
        .stats_get_stats = mv88e6095_stats_get_stats,
-       .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .set_cpu_port = mv88e6095_g1_set_cpu_port,
+       .set_egress_port = mv88e6095_g1_set_egress_port,
        .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -2585,8 +2566,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
        .stats_get_stats = mv88e6095_stats_get_stats,
-       .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .set_cpu_port = mv88e6095_g1_set_cpu_port,
+       .set_egress_port = mv88e6095_g1_set_egress_port,
        .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -2607,17 +2588,17 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6097_port_pause_config,
+       .port_pause_limit = mv88e6097_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
        .stats_get_stats = mv88e6095_stats_get_stats,
-       .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .set_cpu_port = mv88e6095_g1_set_cpu_port,
+       .set_egress_port = mv88e6095_g1_set_egress_port,
        .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -2640,17 +2621,17 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6097_port_pause_config,
+       .port_pause_limit = mv88e6097_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
        .stats_get_stats = mv88e6095_stats_get_stats,
-       .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .set_cpu_port = mv88e6095_g1_set_cpu_port,
+       .set_egress_port = mv88e6095_g1_set_egress_port,
        .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -2672,17 +2653,17 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6097_port_pause_config,
+       .port_pause_limit = mv88e6097_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
        .stats_get_stats = mv88e6095_stats_get_stats,
-       .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .set_cpu_port = mv88e6095_g1_set_cpu_port,
+       .set_egress_port = mv88e6095_g1_set_egress_port,
        .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -2705,17 +2686,17 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6097_port_pause_config,
+       .port_pause_limit = mv88e6097_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
        .stats_get_stats = mv88e6095_stats_get_stats,
-       .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .set_cpu_port = mv88e6095_g1_set_cpu_port,
+       .set_egress_port = mv88e6095_g1_set_egress_port,
        .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -2740,8 +2721,8 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
        .stats_get_stats = mv88e6095_stats_get_stats,
-       .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .set_cpu_port = mv88e6095_g1_set_cpu_port,
+       .set_egress_port = mv88e6095_g1_set_egress_port,
        .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .ppu_enable = mv88e6185_g1_ppu_enable,
@@ -2766,7 +2747,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_pause_config = mv88e6390_port_pause_config,
+       .port_pause_limit = mv88e6390_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6390_g1_stats_snapshot,
@@ -2774,8 +2755,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
        .stats_get_strings = mv88e6320_stats_get_strings,
        .stats_get_stats = mv88e6390_stats_get_stats,
-       .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .set_cpu_port = mv88e6390_g1_set_cpu_port,
+       .set_egress_port = mv88e6390_g1_set_egress_port,
        .watchdog_ops = &mv88e6390_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -2799,7 +2780,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_pause_config = mv88e6390_port_pause_config,
+       .port_pause_limit = mv88e6390_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6390_g1_stats_snapshot,
@@ -2807,8 +2788,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
        .stats_get_strings = mv88e6320_stats_get_strings,
        .stats_get_stats = mv88e6390_stats_get_stats,
-       .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .set_cpu_port = mv88e6390_g1_set_cpu_port,
+       .set_egress_port = mv88e6390_g1_set_egress_port,
        .watchdog_ops = &mv88e6390_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -2832,7 +2813,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_pause_config = mv88e6390_port_pause_config,
+       .port_pause_limit = mv88e6390_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6390_g1_stats_snapshot,
@@ -2840,8 +2821,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
        .stats_get_strings = mv88e6320_stats_get_strings,
        .stats_get_stats = mv88e6390_stats_get_stats,
-       .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .set_cpu_port = mv88e6390_g1_set_cpu_port,
+       .set_egress_port = mv88e6390_g1_set_egress_port,
        .watchdog_ops = &mv88e6390_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -2865,17 +2846,17 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6097_port_pause_config,
+       .port_pause_limit = mv88e6097_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
        .stats_get_stats = mv88e6095_stats_get_stats,
-       .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .set_cpu_port = mv88e6095_g1_set_cpu_port,
+       .set_egress_port = mv88e6095_g1_set_egress_port,
        .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -2899,7 +2880,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_pause_config = mv88e6390_port_pause_config,
+       .port_pause_limit = mv88e6390_port_pause_limit,
        .port_set_cmode = mv88e6390x_port_set_cmode,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
@@ -2908,8 +2889,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
        .stats_get_strings = mv88e6320_stats_get_strings,
        .stats_get_stats = mv88e6390_stats_get_stats,
-       .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .set_cpu_port = mv88e6390_g1_set_cpu_port,
+       .set_egress_port = mv88e6390_g1_set_egress_port,
        .watchdog_ops = &mv88e6390_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -2932,17 +2913,17 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6097_port_pause_config,
+       .port_pause_limit = mv88e6097_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
        .stats_get_strings = mv88e6320_stats_get_strings,
        .stats_get_stats = mv88e6320_stats_get_stats,
-       .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .set_cpu_port = mv88e6095_g1_set_cpu_port,
+       .set_egress_port = mv88e6095_g1_set_egress_port,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
        .vtu_getnext = mv88e6185_g1_vtu_getnext,
@@ -2963,17 +2944,17 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6097_port_pause_config,
+       .port_pause_limit = mv88e6097_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
        .stats_get_strings = mv88e6320_stats_get_strings,
        .stats_get_stats = mv88e6320_stats_get_stats,
-       .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .set_cpu_port = mv88e6095_g1_set_cpu_port,
+       .set_egress_port = mv88e6095_g1_set_egress_port,
        .reset = mv88e6352_g1_reset,
        .vtu_getnext = mv88e6185_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
@@ -2994,17 +2975,17 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6097_port_pause_config,
+       .port_pause_limit = mv88e6097_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6390_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
        .stats_get_strings = mv88e6320_stats_get_strings,
        .stats_get_stats = mv88e6390_stats_get_stats,
-       .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .set_cpu_port = mv88e6390_g1_set_cpu_port,
+       .set_egress_port = mv88e6390_g1_set_egress_port,
        .watchdog_ops = &mv88e6390_watchdog_ops,
        .mgmt_rsvd2cpu =  mv88e6390_g1_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -3025,17 +3006,17 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6097_port_pause_config,
+       .port_pause_limit = mv88e6097_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
        .stats_get_stats = mv88e6095_stats_get_stats,
-       .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .set_cpu_port = mv88e6095_g1_set_cpu_port,
+       .set_egress_port = mv88e6095_g1_set_egress_port,
        .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -3056,17 +3037,17 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6097_port_pause_config,
+       .port_pause_limit = mv88e6097_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
        .stats_get_stats = mv88e6095_stats_get_stats,
-       .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .set_cpu_port = mv88e6095_g1_set_cpu_port,
+       .set_egress_port = mv88e6095_g1_set_egress_port,
        .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -3089,17 +3070,17 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6097_port_pause_config,
+       .port_pause_limit = mv88e6097_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
        .stats_get_stats = mv88e6095_stats_get_stats,
-       .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .set_cpu_port = mv88e6095_g1_set_cpu_port,
+       .set_egress_port = mv88e6095_g1_set_egress_port,
        .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -3123,9 +3104,9 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6390_port_pause_config,
+       .port_pause_limit = mv88e6390_port_pause_limit,
        .port_set_cmode = mv88e6390x_port_set_cmode,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
@@ -3134,8 +3115,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
        .stats_get_strings = mv88e6320_stats_get_strings,
        .stats_get_stats = mv88e6390_stats_get_stats,
-       .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .set_cpu_port = mv88e6390_g1_set_cpu_port,
+       .set_egress_port = mv88e6390_g1_set_egress_port,
        .watchdog_ops = &mv88e6390_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -3159,9 +3140,9 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6390_port_pause_config,
+       .port_pause_limit = mv88e6390_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6390_g1_stats_snapshot,
@@ -3169,8 +3150,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
        .stats_get_strings = mv88e6320_stats_get_strings,
        .stats_get_stats = mv88e6390_stats_get_stats,
-       .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .set_cpu_port = mv88e6390_g1_set_cpu_port,
+       .set_egress_port = mv88e6390_g1_set_egress_port,
        .watchdog_ops = &mv88e6390_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
@@ -3792,7 +3773,8 @@ static void mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
        mutex_lock(&chip->reg_lock);
        if (mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
                                         GLOBAL_ATU_DATA_STATE_MC_STATIC))
-               netdev_err(ds->ports[port].netdev, "failed to load multicast MAC address\n");
+               dev_err(ds->dev, "p%d: failed to load multicast MAC address\n",
+                       port);
        mutex_unlock(&chip->reg_lock);
 }
 
index 98c24af977fd674710bb59a22878781efde3c0c8..d70873498501f5918d9a89186761546bb8d281fe 100644 (file)
 #define MV88E6XXX_MAX_PVT_SWITCHES     32
 #define MV88E6XXX_MAX_PVT_PORTS                16
 
+enum mv88e6xxx_egress_mode {
+       MV88E6XXX_EGRESS_MODE_UNMODIFIED,
+       MV88E6XXX_EGRESS_MODE_UNTAGGED,
+       MV88E6XXX_EGRESS_MODE_TAGGED,
+       MV88E6XXX_EGRESS_MODE_ETHERTYPE,
+};
+
 enum mv88e6xxx_frame_mode {
        MV88E6XXX_FRAME_MODE_NORMAL,
        MV88E6XXX_FRAME_MODE_DSA,
@@ -415,10 +422,12 @@ struct mv88e6xxx_ops {
                                      bool unicast, bool multicast);
        int (*port_set_ether_type)(struct mv88e6xxx_chip *chip, int port,
                                   u16 etype);
-       int (*port_jumbo_config)(struct mv88e6xxx_chip *chip, int port);
+       int (*port_set_jumbo_size)(struct mv88e6xxx_chip *chip, int port,
+                                  size_t size);
 
        int (*port_egress_rate_limiting)(struct mv88e6xxx_chip *chip, int port);
-       int (*port_pause_config)(struct mv88e6xxx_chip *chip, int port);
+       int (*port_pause_limit)(struct mv88e6xxx_chip *chip, int port, u8 in,
+                               u8 out);
        int (*port_disable_learn_limit)(struct mv88e6xxx_chip *chip, int port);
        int (*port_disable_pri_override)(struct mv88e6xxx_chip *chip, int port);
 
@@ -449,8 +458,8 @@ struct mv88e6xxx_ops {
        void (*stats_get_strings)(struct mv88e6xxx_chip *chip,  uint8_t *data);
        void (*stats_get_stats)(struct mv88e6xxx_chip *chip,  int port,
                                uint64_t *data);
-       int (*g1_set_cpu_port)(struct mv88e6xxx_chip *chip, int port);
-       int (*g1_set_egress_port)(struct mv88e6xxx_chip *chip, int port);
+       int (*set_cpu_port)(struct mv88e6xxx_chip *chip, int port);
+       int (*set_egress_port)(struct mv88e6xxx_chip *chip, int port);
        const struct mv88e6xxx_irq_ops *watchdog_ops;
 
        /* Can be either in g1 or g2, so don't use a prefix */
index 0db624f0993c34149d61d7a3f05072aaa3a0f8cb..3500ac0ea8489e3659dbe4eed36123de735bbd1e 100644 (file)
@@ -62,7 +62,7 @@ int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy, int reg, u16 val)
 
 static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page)
 {
-       return mv88e6xxx_phy_write(chip, phy, PHY_PAGE, page);
+       return mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page);
 }
 
 static void mv88e6xxx_phy_page_put(struct mv88e6xxx_chip *chip, int phy)
@@ -72,7 +72,8 @@ static void mv88e6xxx_phy_page_put(struct mv88e6xxx_chip *chip, int phy)
        /* Restore PHY page Copper 0x0 for access via the registered
         * MDIO bus
         */
-       err = mv88e6xxx_phy_write(chip, phy, PHY_PAGE, PHY_PAGE_COPPER);
+       err = mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE,
+                                 MV88E6XXX_PHY_PAGE_COPPER);
        if (unlikely(err)) {
                dev_err(chip->dev,
                        "failed to restore PHY %d page Copper (%d)\n",
@@ -86,7 +87,7 @@ int mv88e6xxx_phy_page_read(struct mv88e6xxx_chip *chip, int phy,
        int err;
 
        /* There is no paging for registers 22 */
-       if (reg == PHY_PAGE)
+       if (reg == MV88E6XXX_PHY_PAGE)
                return -EINVAL;
 
        err = mv88e6xxx_phy_page_get(chip, phy, page);
@@ -104,12 +105,12 @@ int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
        int err;
 
        /* There is no paging for registers 22 */
-       if (reg == PHY_PAGE)
+       if (reg == MV88E6XXX_PHY_PAGE)
                return -EINVAL;
 
        err = mv88e6xxx_phy_page_get(chip, phy, page);
        if (!err) {
-               err = mv88e6xxx_phy_write(chip, phy, PHY_PAGE, page);
+               err = mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page);
                mv88e6xxx_phy_page_put(chip, phy);
        }
 
index 4131a4e8206a3437dff57c1710e7d7be0deef7bd..556b74a0502a776538adf091e336bc77e5f49b72 100644 (file)
@@ -14,8 +14,8 @@
 #ifndef _MV88E6XXX_PHY_H
 #define _MV88E6XXX_PHY_H
 
-#define PHY_PAGE               0x16
-#define PHY_PAGE_COPPER                0x00
+#define MV88E6XXX_PHY_PAGE             0x16
+#define MV88E6XXX_PHY_PAGE_COPPER      0x00
 
 /* PHY Registers accesses implementations */
 int mv88e6165_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
index 3719ece60c615a16d5abbbeedf608822a1fe876a..efeb8d6b02e636142a7fac504fe60500f0694136 100644 (file)
@@ -12,6 +12,7 @@
  * (at your option) any later version.
  */
 
+#include <linux/if_bridge.h>
 #include <linux/phy.h>
 
 #include "chip.h"
@@ -76,9 +77,9 @@ static int mv88e6xxx_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
        if (err)
                return err;
 
-       netdev_dbg(chip->ds->ports[port].netdev, "delay RXCLK %s, TXCLK %s\n",
-                  reg & PORT_PCS_CTRL_RGMII_DELAY_RXCLK ? "yes" : "no",
-                  reg & PORT_PCS_CTRL_RGMII_DELAY_TXCLK ? "yes" : "no");
+       dev_dbg(chip->dev, "p%d: delay RXCLK %s, TXCLK %s\n", port,
+               reg & PORT_PCS_CTRL_RGMII_DELAY_RXCLK ? "yes" : "no",
+               reg & PORT_PCS_CTRL_RGMII_DELAY_TXCLK ? "yes" : "no");
 
        return 0;
 }
@@ -130,9 +131,9 @@ int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link)
        if (err)
                return err;
 
-       netdev_dbg(chip->ds->ports[port].netdev, "%s link %s\n",
-                  reg & PORT_PCS_CTRL_FORCE_LINK ? "Force" : "Unforce",
-                  reg & PORT_PCS_CTRL_LINK_UP ? "up" : "down");
+       dev_dbg(chip->dev, "p%d: %s link %s\n", port,
+               reg & PORT_PCS_CTRL_FORCE_LINK ? "Force" : "Unforce",
+               reg & PORT_PCS_CTRL_LINK_UP ? "up" : "down");
 
        return 0;
 }
@@ -166,9 +167,9 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup)
        if (err)
                return err;
 
-       netdev_dbg(chip->ds->ports[port].netdev, "%s %s duplex\n",
-                  reg & PORT_PCS_CTRL_FORCE_DUPLEX ? "Force" : "Unforce",
-                  reg & PORT_PCS_CTRL_DUPLEX_FULL ? "full" : "half");
+       dev_dbg(chip->dev, "p%d: %s %s duplex\n", port,
+               reg & PORT_PCS_CTRL_FORCE_DUPLEX ? "Force" : "Unforce",
+               reg & PORT_PCS_CTRL_DUPLEX_FULL ? "full" : "half");
 
        return 0;
 }
@@ -226,10 +227,9 @@ static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port,
                return err;
 
        if (speed)
-               netdev_dbg(chip->ds->ports[port].netdev,
-                          "Speed set to %d Mbps\n", speed);
+               dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
        else
-               netdev_dbg(chip->ds->ports[port].netdev, "Speed unforced\n");
+               dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
 
        return 0;
 }
@@ -376,22 +376,24 @@ int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
  * the remote end or the period of time that this port can pause the
  * remote end.
  */
-int mv88e6097_port_pause_config(struct mv88e6xxx_chip *chip, int port)
+int mv88e6097_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
+                              u8 out)
 {
-       return mv88e6xxx_port_write(chip, port, PORT_PAUSE_CTRL, 0x0000);
+       return mv88e6xxx_port_write(chip, port, PORT_PAUSE_CTRL, out << 8 | in);
 }
 
-int mv88e6390_port_pause_config(struct mv88e6xxx_chip *chip, int port)
+int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
+                              u8 out)
 {
        int err;
 
        err = mv88e6xxx_port_write(chip, port, PORT_PAUSE_CTRL,
-                                  PORT_FLOW_CTRL_LIMIT_IN | 0);
+                                  PORT_FLOW_CTRL_LIMIT_IN | in);
        if (err)
                return err;
 
        return mv88e6xxx_port_write(chip, port, PORT_PAUSE_CTRL,
-                                   PORT_FLOW_CTRL_LIMIT_OUT | 0);
+                                   PORT_FLOW_CTRL_LIMIT_OUT | out);
 }
 
 /* Offset 0x04: Port Control Register */
@@ -413,20 +415,39 @@ int mv88e6xxx_port_set_state(struct mv88e6xxx_chip *chip, int port, u8 state)
                return err;
 
        reg &= ~PORT_CONTROL_STATE_MASK;
+
+       switch (state) {
+       case BR_STATE_DISABLED:
+               state = PORT_CONTROL_STATE_DISABLED;
+               break;
+       case BR_STATE_BLOCKING:
+       case BR_STATE_LISTENING:
+               state = PORT_CONTROL_STATE_BLOCKING;
+               break;
+       case BR_STATE_LEARNING:
+               state = PORT_CONTROL_STATE_LEARNING;
+               break;
+       case BR_STATE_FORWARDING:
+               state = PORT_CONTROL_STATE_FORWARDING;
+               break;
+       default:
+               return -EINVAL;
+       }
+
        reg |= state;
 
        err = mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
        if (err)
                return err;
 
-       netdev_dbg(chip->ds->ports[port].netdev, "PortState set to %s\n",
-                  mv88e6xxx_port_state_names[state]);
+       dev_dbg(chip->dev, "p%d: PortState set to %s\n", port,
+               mv88e6xxx_port_state_names[state]);
 
        return 0;
 }
 
 int mv88e6xxx_port_set_egress_mode(struct mv88e6xxx_chip *chip, int port,
-                                  u16 mode)
+                                  enum mv88e6xxx_egress_mode mode)
 {
        int err;
        u16 reg;
@@ -436,7 +457,23 @@ int mv88e6xxx_port_set_egress_mode(struct mv88e6xxx_chip *chip, int port,
                return err;
 
        reg &= ~PORT_CONTROL_EGRESS_MASK;
-       reg |= mode;
+
+       switch (mode) {
+       case MV88E6XXX_EGRESS_MODE_UNMODIFIED:
+               reg |= PORT_CONTROL_EGRESS_UNMODIFIED;
+               break;
+       case MV88E6XXX_EGRESS_MODE_UNTAGGED:
+               reg |= PORT_CONTROL_EGRESS_UNTAGGED;
+               break;
+       case MV88E6XXX_EGRESS_MODE_TAGGED:
+               reg |= PORT_CONTROL_EGRESS_TAGGED;
+               break;
+       case MV88E6XXX_EGRESS_MODE_ETHERTYPE:
+               reg |= PORT_CONTROL_EGRESS_ADD_TAG;
+               break;
+       default:
+               return -EINVAL;
+       }
 
        return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
 }
@@ -580,8 +617,7 @@ int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map)
        if (err)
                return err;
 
-       netdev_dbg(chip->ds->ports[port].netdev, "VLANTable set to %.3x\n",
-                  map);
+       dev_dbg(chip->dev, "p%d: VLANTable set to %.3x\n", port, map);
 
        return 0;
 }
@@ -646,7 +682,7 @@ int mv88e6xxx_port_set_fid(struct mv88e6xxx_chip *chip, int port, u16 fid)
                        return err;
        }
 
-       netdev_dbg(chip->ds->ports[port].netdev, "FID set to %u\n", fid);
+       dev_dbg(chip->dev, "p%d: FID set to %u\n", port, fid);
 
        return 0;
 }
@@ -683,8 +719,7 @@ int mv88e6xxx_port_set_pvid(struct mv88e6xxx_chip *chip, int port, u16 pvid)
        if (err)
                return err;
 
-       netdev_dbg(chip->ds->ports[port].netdev, "DefaultVID set to %u\n",
-                  pvid);
+       dev_dbg(chip->dev, "p%d: DefaultVID set to %u\n", port, pvid);
 
        return 0;
 }
@@ -761,8 +796,8 @@ int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
        if (err)
                return err;
 
-       netdev_dbg(chip->ds->ports[port].netdev, "802.1QMode set to %s\n",
-                  mv88e6xxx_port_8021q_mode_names[mode]);
+       dev_dbg(chip->dev, "p%d: 802.1QMode set to %s\n", port,
+               mv88e6xxx_port_8021q_mode_names[mode]);
 
        return 0;
 }
@@ -781,7 +816,8 @@ int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port)
        return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
 }
 
-int mv88e6165_port_jumbo_config(struct mv88e6xxx_chip *chip, int port)
+int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
+                                 size_t size)
 {
        u16 reg;
        int err;
@@ -790,7 +826,16 @@ int mv88e6165_port_jumbo_config(struct mv88e6xxx_chip *chip, int port)
        if (err)
                return err;
 
-       reg |= PORT_CONTROL_2_JUMBO_10240;
+       reg &= ~PORT_CONTROL_2_JUMBO_MASK;
+
+       if (size <= 1522)
+               reg |= PORT_CONTROL_2_JUMBO_1522;
+       else if (size <= 2048)
+               reg |= PORT_CONTROL_2_JUMBO_2048;
+       else if (size <= 10240)
+               reg |= PORT_CONTROL_2_JUMBO_10240;
+       else
+               return -ERANGE;
 
        return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
 }
index 4f5e1ccfadc65da8dadab9c3ee7d9cce803cbcf8..8a59cabc20fab497fbea23024cf0d744d81770c2 100644 (file)
 #define PORT_CONTROL_2_VTU_PRI_OVERRIDE        BIT(14)
 #define PORT_CONTROL_2_SA_PRIO_OVERRIDE        BIT(13)
 #define PORT_CONTROL_2_DA_PRIO_OVERRIDE        BIT(12)
+#define PORT_CONTROL_2_JUMBO_MASK      (0x03 << 12)
 #define PORT_CONTROL_2_JUMBO_1522      (0x00 << 12)
 #define PORT_CONTROL_2_JUMBO_2048      (0x01 << 12)
 #define PORT_CONTROL_2_JUMBO_10240     (0x02 << 12)
@@ -212,7 +213,7 @@ int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
 int mv88e6095_port_tag_remap(struct mv88e6xxx_chip *chip, int port);
 int mv88e6390_port_tag_remap(struct mv88e6xxx_chip *chip, int port);
 int mv88e6xxx_port_set_egress_mode(struct mv88e6xxx_chip *chip, int port,
-                                  u16 mode);
+                                  enum mv88e6xxx_egress_mode mode);
 int mv88e6085_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
                                  enum mv88e6xxx_frame_mode mode);
 int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
@@ -225,11 +226,14 @@ int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
                                  u16 etype);
 int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
                                    bool message_port);
-int mv88e6165_port_jumbo_config(struct mv88e6xxx_chip *chip, int port);
+int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
+                                 size_t size);
 int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
 int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
-int mv88e6097_port_pause_config(struct mv88e6xxx_chip *chip, int port);
-int mv88e6390_port_pause_config(struct mv88e6xxx_chip *chip, int port);
+int mv88e6097_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
+                              u8 out);
+int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
+                              u8 out);
 int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                              phy_interface_t mode);
 int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
index 5a2ad9c5faab16a8206bde14a7fb28f079e0ec15..a934bd5d05075308f6087c5a4aaa0e099251fda3 100644 (file)
@@ -1846,7 +1846,8 @@ static void xgbe_poll_controller(struct net_device *netdev)
 }
 #endif /* End CONFIG_NET_POLL_CONTROLLER */
 
-static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+static int xgbe_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index,
+                        __be16 proto,
                         struct tc_to_netdev *tc_to_netdev)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
index 5f49334dcad5a8c8602cc3aa2e8795b2d489bb43..ef734675885e72fd61013d663940f25fa95d44c5 100644 (file)
@@ -4273,8 +4273,8 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
        return 0;
 }
 
-int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
-                    struct tc_to_netdev *tc)
+int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+                    __be16 proto, struct tc_to_netdev *tc)
 {
        if (tc->type != TC_SETUP_MQPRIO)
                return -EINVAL;
index 243cb9748d35da488463f60d0e59a035a4330333..c26688d2f32663f348b5c21dff1931f07b7487c8 100644 (file)
@@ -486,8 +486,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
 
 /* setup_tc callback */
 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
-int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
-                    struct tc_to_netdev *tc);
+int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+                    __be16 proto, struct tc_to_netdev *tc);
 
 int bnx2x_get_vf_config(struct net_device *dev, int vf,
                        struct ifla_vf_info *ivi);
index c1cd72a5eccf80d8ab171681d24f608bd6dace1b..11e8a866a31232e123567fc539b4a1ae45a7cfad 100644 (file)
@@ -7103,8 +7103,8 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
        return 0;
 }
 
-static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
-                        struct tc_to_netdev *ntc)
+static int bnxt_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+                        __be16 proto, struct tc_to_netdev *ntc)
 {
        if (ntc->type != TC_SETUP_MQPRIO)
                return -EINVAL;
index 2e253061460b76e8e3136586926b97f532566e52..53856af07d46396ddc3ee2d5dbe73fe792f70cb8 100644 (file)
@@ -700,6 +700,13 @@ static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
        lio->msg_enable = msglvl;
 }
 
+static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl)
+{
+       struct lio *lio = GET_LIO(netdev);
+
+       lio->msg_enable = msglvl;
+}
+
 static void
 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
 {
@@ -2611,7 +2618,7 @@ static const struct ethtool_ops lio_vf_ethtool_ops = {
        .get_regs_len           = lio_get_regs_len,
        .get_regs               = lio_get_regs,
        .get_msglevel           = lio_get_msglevel,
-       .set_msglevel           = lio_set_msglevel,
+       .set_msglevel           = lio_vf_set_msglevel,
        .get_sset_count         = lio_vf_get_sset_count,
        .get_coalesce           = lio_get_intr_coalesce,
        .set_coalesce           = lio_set_intr_coalesce,
index 07124096db48b8bb399155003f7e23a8ff5098c4..1f7032614ae589ba9d2c6b7134af909e6b398cba 100644 (file)
@@ -2997,10 +2997,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
                        liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
                                             OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
 
-               if ((debug != -1) && (debug & NETIF_MSG_HW))
-                       liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE,
-                                            0);
-
                if (setup_link_status_change_wq(netdev))
                        goto setup_nic_dev_fail;
 
index 91685bf21878c62d72e8ab9e3d37e80ead2dc7d9..01c9710fc62e683f883fdab58526b6c66fb7c153 100644 (file)
@@ -2823,12 +2823,15 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
        return err;
 }
 
-static int cxgb_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
-                        struct tc_to_netdev *tc)
+static int cxgb_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+                        __be16 proto, struct tc_to_netdev *tc)
 {
        struct port_info *pi = netdev2pinfo(dev);
        struct adapter *adap = netdev2adap(dev);
 
+       if (chain_index)
+               return -EOPNOTSUPP;
+
        if (!(adap->flags & FULL_INIT_DONE)) {
                dev_err(adap->pdev_dev,
                        "Failed to setup tc on port %d. Link Down?\n",
@@ -5166,13 +5169,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                              &v, &port_vec);
        if (err < 0) {
                dev_err(adapter->pdev_dev, "Could not fetch port params\n");
-               goto free_adapter;
+               goto free_mbox_log;
        }
 
        adapter->params.nports = hweight32(port_vec);
        pci_set_drvdata(pdev, adapter);
        return 0;
 
+free_mbox_log:
+       kfree(adapter->mbox_log);
  free_adapter:
        kfree(adapter);
  free_pci_region:
@@ -5272,6 +5277,7 @@ static void remove_one(struct pci_dev *pdev)
                        unregister_netdev(adapter->port[0]);
                iounmap(adapter->regs);
                kfree(adapter->vfinfo);
+               kfree(adapter->mbox_log);
                kfree(adapter);
                pci_disable_sriov(pdev);
                pci_release_regions(pdev);
@@ -5318,6 +5324,7 @@ static void shutdown_one(struct pci_dev *pdev)
                        unregister_netdev(adapter->port[0]);
                iounmap(adapter->regs);
                kfree(adapter->vfinfo);
+               kfree(adapter->mbox_log);
                kfree(adapter);
                pci_disable_sriov(pdev);
                pci_release_regions(pdev);
index 4618185d6bc20822e6b1cf377f44579e4a57c201..16af646a7fe426fd9a0827f3a8751bef423a63eb 100644 (file)
@@ -4040,6 +4040,7 @@ static void cim_intr_handler(struct adapter *adapter)
                { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
                { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
                { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
+               { TIMER0INT_F, "CIM TIMER0 interrupt", -1, 1 },
                { 0 }
        };
        static const struct intr_info cim_upintr_info[] = {
@@ -4074,11 +4075,27 @@ static void cim_intr_handler(struct adapter *adapter)
                { 0 }
        };
 
+       u32 val, fw_err;
        int fat;
 
-       if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
+       fw_err = t4_read_reg(adapter, PCIE_FW_A);
+       if (fw_err & PCIE_FW_ERR_F)
                t4_report_fw_error(adapter);
 
+       /* When the Firmware detects an internal error which normally
+        * wouldn't raise a Host Interrupt, it forces a CIM Timer0 interrupt
+        * in order to make sure the Host sees the Firmware Crash.  So
+        * if we have a Timer0 interrupt and don't see a Firmware Crash,
+        * ignore the Timer0 interrupt.
+        */
+
+       val = t4_read_reg(adapter, CIM_HOST_INT_CAUSE_A);
+       if (val & TIMER0INT_F)
+               if (!(fw_err & PCIE_FW_ERR_F) ||
+                   (PCIE_FW_EVAL_G(fw_err) != PCIE_FW_EVAL_CRASH))
+                       t4_write_reg(adapter, CIM_HOST_INT_CAUSE_A,
+                                    TIMER0INT_F);
+
        fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
                                    cim_intr_info) +
              t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
@@ -7688,10 +7705,9 @@ int t4_shutdown_adapter(struct adapter *adapter)
        t4_intr_disable(adapter);
        t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
        for_each_port(adapter, port) {
-               u32 a_port_cfg = PORT_REG(port,
-                                         is_t4(adapter->params.chip)
-                                         ? XGMAC_PORT_CFG_A
-                                         : MAC_PORT_CFG_A);
+               u32 a_port_cfg = is_t4(adapter->params.chip) ?
+                                      PORT_REG(port, XGMAC_PORT_CFG_A) :
+                                      T5_PORT_REG(port, MAC_PORT_CFG_A);
 
                t4_write_reg(adapter, a_port_cfg,
                             t4_read_reg(adapter, a_port_cfg)
index 3348d33c36faca900e92bca25607ac79b0246908..3884336ce23ca79c2730e3c294601af092e0afaa 100644 (file)
 #define TIEQINPARERRINT_V(x) ((x) << TIEQINPARERRINT_S)
 #define TIEQINPARERRINT_F    TIEQINPARERRINT_V(1U)
 
+#define TIMER0INT_S    2
+#define TIMER0INT_V(x) ((x) << TIMER0INT_S)
+#define TIMER0INT_F    TIMER0INT_V(1U)
+
 #define PREFDROPINT_S    1
 #define PREFDROPINT_V(x) ((x) << PREFDROPINT_S)
 #define PREFDROPINT_F    PREFDROPINT_V(1U)
index c65c33c03bcbba027ad32af7bc20c5f8d8412fd8..f47461aa658b482139bbec350cbf9f5ffe6498fc 100644 (file)
@@ -3088,6 +3088,10 @@ struct fw_debug_cmd {
 #define FW_DEBUG_CMD_TYPE_G(x) \
        (((x) >> FW_DEBUG_CMD_TYPE_S) & FW_DEBUG_CMD_TYPE_M)
 
+enum pcie_fw_eval {
+       PCIE_FW_EVAL_CRASH = 0,
+};
+
 #define PCIE_FW_ERR_S          31
 #define PCIE_FW_ERR_V(x)       ((x) << PCIE_FW_ERR_S)
 #define PCIE_FW_ERR_F          PCIE_FW_ERR_V(1U)
index 9a520e4f0df9a0d47b75f71f01557414ba3d4eab..a5501af6db991fdd49438f905902f2a4eb65e7e5 100644 (file)
@@ -342,8 +342,8 @@ static void dpaa_get_stats64(struct net_device *net_dev,
        }
 }
 
-static int dpaa_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
-                        struct tc_to_netdev *tc)
+static int dpaa_setup_tc(struct net_device *net_dev, u32 handle,
+                        u32 chain_index, __be16 proto, struct tc_to_netdev *tc)
 {
        struct dpaa_priv *priv = netdev_priv(net_dev);
        u8 num_tc;
index 297fd196c8793d3ed7e00afb96adea6fccd7db83..a6e323f1563745c3a0cedd91d240e05af6606f33 100644 (file)
@@ -2379,6 +2379,10 @@ static void fec_enet_clear_ethtool_stats(struct net_device *dev)
 static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
 {
 }
+
+static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
+{
+}
 #endif /* !defined(CONFIG_M5272) */
 
 /* ITR clock source is enet system clock (clk_ahb).
index 5b88cc690c22c1ce8e123cdb3c0d23908414f8df..35865d05fccd3709fad96d41fa3a6b83edd213f8 100644 (file)
@@ -276,7 +276,7 @@ static int genmii_read_link(struct mii_phy *phy)
 }
 
 /* Generic implementation for most 10/100/1000 PHYs */
-static struct mii_phy_ops generic_phy_ops = {
+static const struct mii_phy_ops generic_phy_ops = {
        .setup_aneg     = genmii_setup_aneg,
        .setup_forced   = genmii_setup_forced,
        .poll_link      = genmii_poll_link,
@@ -340,7 +340,7 @@ static int cis8201_init(struct mii_phy *phy)
        return 0;
 }
 
-static struct mii_phy_ops cis8201_phy_ops = {
+static const struct mii_phy_ops cis8201_phy_ops = {
        .init           = cis8201_init,
        .setup_aneg     = genmii_setup_aneg,
        .setup_forced   = genmii_setup_forced,
@@ -420,7 +420,7 @@ static int et1011c_init(struct mii_phy *phy)
        return 0;
 }
 
-static struct mii_phy_ops et1011c_phy_ops = {
+static const struct mii_phy_ops et1011c_phy_ops = {
        .init           = et1011c_init,
        .setup_aneg     = genmii_setup_aneg,
        .setup_forced   = genmii_setup_forced,
@@ -439,7 +439,7 @@ static struct mii_phy_def et1011c_phy_def = {
 
 
 
-static struct mii_phy_ops m88e1111_phy_ops = {
+static const struct mii_phy_ops m88e1111_phy_ops = {
        .init           = m88e1111_init,
        .setup_aneg     = genmii_setup_aneg,
        .setup_forced   = genmii_setup_forced,
@@ -455,7 +455,7 @@ static struct mii_phy_def m88e1111_phy_def = {
        .ops            = &m88e1111_phy_ops,
 };
 
-static struct mii_phy_ops m88e1112_phy_ops = {
+static const struct mii_phy_ops m88e1112_phy_ops = {
        .init           = m88e1112_init,
        .setup_aneg     = genmii_setup_aneg,
        .setup_forced   = genmii_setup_forced,
@@ -480,7 +480,7 @@ static int ar8035_init(struct mii_phy *phy)
        return 0;
 }
 
-static struct mii_phy_ops ar8035_phy_ops = {
+static const struct mii_phy_ops ar8035_phy_ops = {
        .init           = ar8035_init,
        .setup_aneg     = genmii_setup_aneg,
        .setup_forced   = genmii_setup_forced,
index e1d46c11cb61b1920fac6287d2eda6f115715bf3..2dcb5463d9b86176e5d915a5e602f52cab43ae1f 100644 (file)
@@ -6640,12 +6640,17 @@ static int e1000e_pm_thaw(struct device *dev)
 static int e1000e_pm_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
+       int rc;
 
        e1000e_flush_lpic(pdev);
 
        e1000e_pm_freeze(dev);
 
-       return __e1000_shutdown(pdev, false);
+       rc = __e1000_shutdown(pdev, false);
+       if (rc)
+               e1000e_pm_thaw(dev);
+
+       return rc;
 }
 
 static int e1000e_pm_resume(struct device *dev)
index 24f2f6f86f5a3a50929eb52b5dbe6eb08e0e8635..5e37387c70825db8c1edc425c21478787e213a5e 100644 (file)
@@ -1265,8 +1265,8 @@ int fm10k_setup_tc(struct net_device *dev, u8 tc)
        return err;
 }
 
-static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
-                           struct tc_to_netdev *tc)
+static int __fm10k_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+                           __be16 proto, struct tc_to_netdev *tc)
 {
        if (tc->type != TC_SETUP_MQPRIO)
                return -EINVAL;
index 5fef27ebfa52671b9dfea5bcf8a1e273f5678253..abab7fb7a3fcc0af654260e95b4b28637a1c481e 100644 (file)
@@ -5509,7 +5509,8 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc)
        return ret;
 }
 
-static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+static int __i40e_setup_tc(struct net_device *netdev, u32 handle,
+                          u32 chain_index, __be16 proto,
                           struct tc_to_netdev *tc)
 {
        if (tc->type != TC_SETUP_MQPRIO)
index 7e433344a13cbaf90c4c2c1c1bcbaf13376d6e60..ec62410b035a1ba9678e405ab4ddb53f060901b6 100644 (file)
@@ -6469,8 +6469,8 @@ static void igb_set_default_mac_filter(struct igb_adapter *adapter)
        igb_rar_set_index(adapter, 0);
 }
 
-int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
-                      const u8 queue)
+static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
+                             const u8 queue)
 {
        struct e1000_hw *hw = &adapter->hw;
        int rar_entries = hw->mac.rar_entry_count -
@@ -6499,8 +6499,8 @@ int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
        return -ENOSPC;
 }
 
-int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
-                      const u8 queue)
+static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
+                             const u8 queue)
 {
        struct e1000_hw *hw = &adapter->hw;
        int rar_entries = hw->mac.rar_entry_count -
@@ -6552,8 +6552,8 @@ static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
        return 0;
 }
 
-int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
-                         const u32 info, const u8 *addr)
+static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
+                                const u32 info, const u8 *addr)
 {
        struct pci_dev *pdev = adapter->pdev;
        struct vf_data_storage *vf_data = &adapter->vf_data[vf];
index 54463f03b3db50e6b5b0329c94a419ea6b2a7a9a..812319ab77db9612e91ea19583a6cb21a80cf4d5 100644 (file)
@@ -9200,11 +9200,14 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
        return err;
 }
 
-static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
-                           struct tc_to_netdev *tc)
+static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+                           __be16 proto, struct tc_to_netdev *tc)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
 
+       if (chain_index)
+               return -EOPNOTSUPP;
+
        if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
            tc->type == TC_SETUP_CLSU32) {
                switch (tc->cls_u32->command) {
index 82436742ad75b011847cde250bc26f7a2d9e7175..c1de75fc399a31fc30d34b4a8ffe84a05baa7a85 100644 (file)
@@ -86,7 +86,8 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
        return 0;
 }
 
-static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle,
+                             u32 chain_index, __be16 proto,
                              struct tc_to_netdev *tc)
 {
        if (tc->type != TC_SETUP_MQPRIO)
index cdff04b2aea19de6c32779686c953a5caedd2cdf..5afec0f4a6588b8198603ff3f890a24e617ff53a 100644 (file)
@@ -2991,13 +2991,17 @@ static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
 }
 
 static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
-                             __be16 proto, struct tc_to_netdev *tc)
+                             u32 chain_index, __be16 proto,
+                             struct tc_to_netdev *tc)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
 
        if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
                goto mqprio;
 
+       if (chain_index)
+               return -EOPNOTSUPP;
+
        switch (tc->type) {
        case TC_SETUP_CLSFLOWER:
                switch (tc->cls_flower->command) {
index 79462c0368a0781ca7a38354726ed628097c0c89..70c2b8d020bd6d45311c4469de058a0a9f581bf6 100644 (file)
@@ -652,7 +652,8 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
 }
 
 static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle,
-                                 __be16 proto, struct tc_to_netdev *tc)
+                                 u32 chain_index, __be16 proto,
+                                 struct tc_to_netdev *tc)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
 
@@ -664,9 +665,13 @@ static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle,
                struct net_device *uplink_dev = mlx5_eswitch_get_uplink_netdev(esw);
 
                return uplink_dev->netdev_ops->ndo_setup_tc(uplink_dev, handle,
+                                                           chain_index,
                                                            proto, tc);
        }
 
+       if (chain_index)
+               return -EOPNOTSUPP;
+
        switch (tc->type) {
        case TC_SETUP_CLSFLOWER:
                switch (tc->cls_flower->command) {
index f60e2ba515d030b283047dc3ba49408816dd6444..0e51c3693243539de1b7bf973db78a036e0bbf99 100644 (file)
@@ -860,21 +860,13 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
 }
 
-static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-                                   u8 swid)
-{
-       char pspa_pl[MLXSW_REG_PSPA_LEN];
-
-       mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
-}
-
 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char pspa_pl[MLXSW_REG_PSPA_LEN];
 
-       return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
-                                       swid);
+       mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
 }
 
 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
@@ -975,13 +967,14 @@ static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
        return 0;
 }
 
-static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
                                    u8 module, u8 width, u8 lane)
 {
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        char pmlp_pl[MLXSW_REG_PMLP_LEN];
        int i;
 
-       mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+       mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
        mlxsw_reg_pmlp_width_set(pmlp_pl, width);
        for (i = 0; i < width; i++) {
                mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
@@ -991,11 +984,12 @@ static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
 }
 
-static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
 {
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        char pmlp_pl[MLXSW_REG_PMLP_LEN];
 
-       mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+       mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
        mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
 }
@@ -1699,11 +1693,15 @@ static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
 }
 
 static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
-                            __be16 proto, struct tc_to_netdev *tc)
+                            u32 chain_index, __be16 proto,
+                            struct tc_to_netdev *tc)
 {
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
        bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
 
+       if (chain_index)
+               return -EOPNOTSUPP;
+
        switch (tc->type) {
        case TC_SETUP_MATCHALL:
                switch (tc->cls_mall->command) {
@@ -1752,12 +1750,6 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
        .ndo_get_offload_stats  = mlxsw_sp_port_get_offload_stats,
        .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
        .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
-       .ndo_fdb_add            = switchdev_port_fdb_add,
-       .ndo_fdb_del            = switchdev_port_fdb_del,
-       .ndo_fdb_dump           = switchdev_port_fdb_dump,
-       .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
-       .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
-       .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
        .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
 };
 
@@ -2657,17 +2649,26 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
        return 0;
 }
 
-static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-                                 bool split, u8 module, u8 width, u8 lane)
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+                               bool split, u8 module, u8 width, u8 lane)
 {
        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
        struct mlxsw_sp_port *mlxsw_sp_port;
        struct net_device *dev;
        int err;
 
+       err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
+                       local_port);
+               return err;
+       }
+
        dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
-       if (!dev)
-               return -ENOMEM;
+       if (!dev) {
+               err = -ENOMEM;
+               goto err_alloc_etherdev;
+       }
        SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
        mlxsw_sp_port = netdev_priv(dev);
        mlxsw_sp_port->dev = dev;
@@ -2709,6 +2710,13 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
        dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
        dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
 
+       err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
+                       mlxsw_sp_port->local_port);
+               goto err_port_module_map;
+       }
+
        err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
@@ -2831,6 +2839,8 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
 err_dev_addr_init:
        mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
 err_port_swid_set:
+       mlxsw_sp_port_module_unmap(mlxsw_sp_port);
+err_port_module_map:
        kfree(mlxsw_sp_port->hw_stats.cache);
 err_alloc_hw_stats:
        kfree(mlxsw_sp_port->sample);
@@ -2838,32 +2848,12 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
        free_percpu(mlxsw_sp_port->pcpu_stats);
 err_alloc_stats:
        free_netdev(dev);
-       return err;
-}
-
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-                               bool split, u8 module, u8 width, u8 lane)
-{
-       int err;
-
-       err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
-       if (err) {
-               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
-                       local_port);
-               return err;
-       }
-       err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
-                                    module, width, lane);
-       if (err)
-               goto err_port_create;
-       return 0;
-
-err_port_create:
+err_alloc_etherdev:
        mlxsw_core_port_fini(mlxsw_sp->core, local_port);
        return err;
 }
 
-static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
 {
        struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
 
@@ -2876,17 +2866,12 @@ static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
        mlxsw_sp_port_fids_fini(mlxsw_sp_port);
        mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
        mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
-       mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
+       mlxsw_sp_port_module_unmap(mlxsw_sp_port);
        kfree(mlxsw_sp_port->hw_stats.cache);
        kfree(mlxsw_sp_port->sample);
        free_percpu(mlxsw_sp_port->pcpu_stats);
        WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
        free_netdev(mlxsw_sp_port->dev);
-}
-
-static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
-{
-       __mlxsw_sp_port_remove(mlxsw_sp, local_port);
        mlxsw_core_port_fini(mlxsw_sp->core, local_port);
 }
 
@@ -2964,19 +2949,6 @@ static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
        u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
        int err, i;
 
-       for (i = 0; i < count; i++) {
-               err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
-                                              width, i * width);
-               if (err)
-                       goto err_port_module_map;
-       }
-
-       for (i = 0; i < count; i++) {
-               err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
-               if (err)
-                       goto err_port_swid_set;
-       }
-
        for (i = 0; i < count; i++) {
                err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
                                           module, width, i * width);
@@ -2990,15 +2962,6 @@ static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
        for (i--; i >= 0; i--)
                if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
                        mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
-       i = count;
-err_port_swid_set:
-       for (i--; i >= 0; i--)
-               __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
-                                        MLXSW_PORT_SWID_DISABLED_PORT);
-       i = count;
-err_port_module_map:
-       for (i--; i >= 0; i--)
-               mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
        return err;
 }
 
@@ -3013,17 +2976,6 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
         */
        count = count / 2;
 
-       for (i = 0; i < count; i++) {
-               local_port = base_port + i * 2;
-               module = mlxsw_sp->port_to_module[local_port];
-
-               mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
-                                        0);
-       }
-
-       for (i = 0; i < count; i++)
-               __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
-
        for (i = 0; i < count; i++) {
                local_port = base_port + i * 2;
                module = mlxsw_sp->port_to_module[local_port];
@@ -3705,7 +3657,7 @@ struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
        return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
 }
 
-static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
+struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
 {
        struct mlxsw_sp_port *mlxsw_sp_port;
 
index 4a7a39a9f1a13d3770d2b4fb05c1f7af3393c311..5ef98d4d0ab6c8d3fa36781a72cd954bea26fe59 100644 (file)
@@ -361,6 +361,7 @@ struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
+struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev);
 
 /* spectrum_dcb.c */
 #ifdef CONFIG_MLXSW_SPECTRUM_DCB
index 20061058801ecee066a144e7c5cb0ab86e1a68c7..700cc8c6aa5be69c9c6135c0572259434143955e 100644 (file)
@@ -591,7 +591,7 @@ static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
        return 0;
 }
 
-#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
+#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
 
 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
 {
index edcc273d7597677cb04a34612da9f1554a2b6200..cd89a3e6cd8184c1b80037b8a86e3dc3bc0e3a6d 100644 (file)
@@ -102,8 +102,6 @@ struct mlxsw_sp_bridge_vlan {
        struct list_head list;
        struct list_head port_vlan_list;
        u16 vid;
-       u8 egress_untagged:1,
-          pvid:1;
 };
 
 struct mlxsw_sp_bridge_ops {
@@ -456,6 +454,9 @@ static int mlxsw_sp_port_attr_get(struct net_device *dev,
                mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
                                               &attr->u.brport_flags);
                break;
+       case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
+               attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD;
+               break;
        default:
                return -EOPNOTSUPP;
        }
@@ -1000,8 +1001,6 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
                goto err_port_vlan_bridge_join;
 
        bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
-       bridge_vlan->egress_untagged = is_untagged;
-       bridge_vlan->pvid = is_pvid;
 
        return 0;
 
@@ -1142,44 +1141,40 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
 }
 
 static int
-mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
-                            const struct switchdev_obj_port_fdb *fdb,
-                            struct switchdev_trans *trans)
+mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                     struct switchdev_notifier_fdb_info *fdb_info, bool adding)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       struct net_device *orig_dev = fdb->obj.orig_dev;
+       struct net_device *orig_dev = fdb_info->info.dev;
        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
        struct mlxsw_sp_bridge_device *bridge_device;
        struct mlxsw_sp_bridge_port *bridge_port;
        u16 fid_index, vid;
 
-       if (switchdev_trans_ph_prepare(trans))
-               return 0;
-
        bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
-       if (WARN_ON(!bridge_port))
+       if (!bridge_port)
                return -EINVAL;
 
        bridge_device = bridge_port->bridge_device;
        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
                                                               bridge_device,
-                                                              fdb->vid);
+                                                              fdb_info->vid);
        if (!mlxsw_sp_port_vlan)
                return 0;
 
        fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
        vid = mlxsw_sp_port_vlan->vid;
 
-       if (!mlxsw_sp_port->lagged)
+       if (!bridge_port->lagged)
                return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
-                                              mlxsw_sp_port->local_port,
-                                              fdb->addr, fid_index, true,
-                                              false);
+                                              bridge_port->system_port,
+                                              fdb_info->addr, fid_index,
+                                              adding, false);
        else
                return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
-                                                  mlxsw_sp_port->lag_id,
-                                                  fdb->addr, fid_index, vid,
-                                                  true, false);
+                                                  bridge_port->lag_id,
+                                                  fdb_info->addr, fid_index,
+                                                  vid, adding, false);
 }
 
 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
@@ -1349,11 +1344,6 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
                                              SWITCHDEV_OBJ_PORT_VLAN(obj),
                                              trans);
                break;
-       case SWITCHDEV_OBJ_ID_PORT_FDB:
-               err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
-                                                  SWITCHDEV_OBJ_PORT_FDB(obj),
-                                                  trans);
-               break;
        case SWITCHDEV_OBJ_ID_PORT_MDB:
                err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
                                            SWITCHDEV_OBJ_PORT_MDB(obj),
@@ -1405,43 +1395,6 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
        return 0;
 }
 
-static int
-mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
-                            const struct switchdev_obj_port_fdb *fdb)
-{
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       struct net_device *orig_dev = fdb->obj.orig_dev;
-       struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
-       struct mlxsw_sp_bridge_device *bridge_device;
-       struct mlxsw_sp_bridge_port *bridge_port;
-       u16 fid_index, vid;
-
-       bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
-       if (WARN_ON(!bridge_port))
-               return -EINVAL;
-
-       bridge_device = bridge_port->bridge_device;
-       mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
-                                                              bridge_device,
-                                                              fdb->vid);
-       if (!mlxsw_sp_port_vlan)
-               return 0;
-
-       fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
-       vid = mlxsw_sp_port_vlan->vid;
-
-       if (!mlxsw_sp_port->lagged)
-               return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
-                                              mlxsw_sp_port->local_port,
-                                              fdb->addr, fid_index, false,
-                                              false);
-       else
-               return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
-                                                  mlxsw_sp_port->lag_id,
-                                                  fdb->addr, fid_index, vid,
-                                                  false, false);
-}
-
 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
                                 const struct switchdev_obj_port_mdb *mdb)
 {
@@ -1501,10 +1454,6 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
                err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
                                              SWITCHDEV_OBJ_PORT_VLAN(obj));
                break;
-       case SWITCHDEV_OBJ_ID_PORT_FDB:
-               err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
-                                                  SWITCHDEV_OBJ_PORT_FDB(obj));
-               break;
        case SWITCHDEV_OBJ_ID_PORT_MDB:
                err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
                                            SWITCHDEV_OBJ_PORT_MDB(obj));
@@ -1534,161 +1483,11 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
        return NULL;
 }
 
-static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
-                                 struct switchdev_obj_port_fdb *fdb,
-                                 switchdev_obj_dump_cb_t *cb)
-{
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       struct net_device *orig_dev = fdb->obj.orig_dev;
-       struct mlxsw_sp_bridge_port *bridge_port;
-       u16 lag_id, fid_index;
-       char mac[ETH_ALEN];
-       int stored_err = 0;
-       char *sfd_pl;
-       u8 num_rec;
-       int err;
-
-       bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
-       if (!bridge_port)
-               return 0;
-
-       sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
-       if (!sfd_pl)
-               return -ENOMEM;
-
-       mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
-       do {
-               struct mlxsw_sp_port *tmp;
-               u8 local_port;
-               int i;
-
-               mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
-               err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
-               if (err)
-                       goto out;
-
-               num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
-
-               /* Even in case of error, we have to run the dump to the end
-                * so the session in firmware is finished.
-                */
-               if (stored_err)
-                       continue;
-
-               for (i = 0; i < num_rec; i++) {
-                       switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
-                       case MLXSW_REG_SFD_REC_TYPE_UNICAST:
-                               mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac,
-                                                       &fid_index,
-                                                       &local_port);
-                               if (bridge_port->lagged)
-                                       continue;
-                               if (bridge_port->system_port != local_port)
-                                       continue;
-                               if (bridge_port->bridge_device->vlan_enabled)
-                                       fdb->vid = fid_index;
-                               else
-                                       fdb->vid = 0;
-                               ether_addr_copy(fdb->addr, mac);
-                               fdb->ndm_state = NUD_REACHABLE;
-                               err = cb(&fdb->obj);
-                               if (err)
-                                       stored_err = err;
-                               break;
-                       case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
-                               mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
-                                                           mac, &fid_index,
-                                                           &lag_id);
-                               if (!bridge_port->lagged)
-                                       continue;
-                               if (bridge_port->lag_id != lag_id)
-                                       continue;
-                               tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
-                               if (tmp->local_port !=
-                                   mlxsw_sp_port->local_port)
-                                       continue;
-                               if (bridge_port->bridge_device->vlan_enabled)
-                                       fdb->vid = fid_index;
-                               else
-                                       fdb->vid = 0;
-                               ether_addr_copy(fdb->addr, mac);
-                               fdb->ndm_state = NUD_REACHABLE;
-                               err = cb(&fdb->obj);
-                               if (err)
-                                       stored_err = err;
-                               break;
-                       }
-               }
-       } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
-
-out:
-       kfree(sfd_pl);
-       return stored_err ? stored_err : err;
-}
-
-static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
-                                  struct switchdev_obj_port_vlan *vlan,
-                                  switchdev_obj_dump_cb_t *cb)
-{
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       struct net_device *orig_dev = vlan->obj.orig_dev;
-       struct mlxsw_sp_bridge_port *bridge_port;
-       struct mlxsw_sp_bridge_vlan *bridge_vlan;
-       int err = 0;
-
-       bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
-       if (WARN_ON(!bridge_port))
-               return -EINVAL;
-
-       if (!bridge_port->bridge_device->vlan_enabled)
-               return 0;
-
-       list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
-               vlan->flags = 0;
-               if (bridge_vlan->pvid)
-                       vlan->flags |= BRIDGE_VLAN_INFO_PVID;
-               if (bridge_vlan->egress_untagged)
-                       vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
-               vlan->vid_begin = bridge_vlan->vid;
-               vlan->vid_end = bridge_vlan->vid;
-               err = cb(&vlan->obj);
-               if (err)
-                       break;
-       }
-
-       return err;
-}
-
-static int mlxsw_sp_port_obj_dump(struct net_device *dev,
-                                 struct switchdev_obj *obj,
-                                 switchdev_obj_dump_cb_t *cb)
-{
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-       int err = 0;
-
-       switch (obj->id) {
-       case SWITCHDEV_OBJ_ID_PORT_VLAN:
-               err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
-                                             SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
-               break;
-       case SWITCHDEV_OBJ_ID_PORT_FDB:
-               err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
-                                            SWITCHDEV_OBJ_PORT_FDB(obj), cb);
-               break;
-       default:
-               err = -EOPNOTSUPP;
-               break;
-       }
-
-       return err;
-}
-
 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
        .switchdev_port_attr_get        = mlxsw_sp_port_attr_get,
        .switchdev_port_attr_set        = mlxsw_sp_port_attr_set,
        .switchdev_port_obj_add         = mlxsw_sp_port_obj_add,
        .switchdev_port_obj_del         = mlxsw_sp_port_obj_del,
-       .switchdev_port_obj_dump        = mlxsw_sp_port_obj_dump,
 };
 
 static int
@@ -1857,19 +1656,16 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
        mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
 }
 
-static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
-                                       char *mac, u16 vid,
-                                       struct net_device *dev)
+static void
+mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
+                           const char *mac, u16 vid,
+                           struct net_device *dev)
 {
        struct switchdev_notifier_fdb_info info;
-       unsigned long notifier_type;
 
-       if (learning_sync) {
-               info.addr = mac;
-               info.vid = vid;
-               notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
-               call_switchdev_notifiers(notifier_type, dev, &info.info);
-       }
+       info.addr = mac;
+       info.vid = vid;
+       call_switchdev_notifiers(type, dev, &info.info);
 }
 
 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
@@ -1880,6 +1676,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
        struct mlxsw_sp_bridge_device *bridge_device;
        struct mlxsw_sp_bridge_port *bridge_port;
        struct mlxsw_sp_port *mlxsw_sp_port;
+       enum switchdev_notifier_type type;
        char mac[ETH_ALEN];
        u8 local_port;
        u16 vid, fid;
@@ -1918,8 +1715,9 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
 
        if (!do_notification)
                return;
-       mlxsw_sp_fdb_call_notifiers(bridge_port->flags & BR_LEARNING_SYNC,
-                                   adding, mac, vid, bridge_port->dev);
+       type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
+       mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
+
        return;
 
 just_remove:
@@ -1936,6 +1734,7 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
        struct mlxsw_sp_bridge_device *bridge_device;
        struct mlxsw_sp_bridge_port *bridge_port;
        struct mlxsw_sp_port *mlxsw_sp_port;
+       enum switchdev_notifier_type type;
        char mac[ETH_ALEN];
        u16 lag_vid = 0;
        u16 lag_id;
@@ -1976,8 +1775,9 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
 
        if (!do_notification)
                return;
-       mlxsw_sp_fdb_call_notifiers(bridge_port->flags & BR_LEARNING_SYNC,
-                                   adding, mac, vid, bridge_port->dev);
+       type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
+       mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
+
        return;
 
 just_remove:
@@ -2050,6 +1850,97 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
        mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
 }
 
+struct mlxsw_sp_switchdev_event_work {
+       struct work_struct work;
+       struct switchdev_notifier_fdb_info fdb_info;
+       struct net_device *dev;
+       unsigned long event;
+};
+
+static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
+{
+       struct mlxsw_sp_switchdev_event_work *switchdev_work =
+               container_of(work, struct mlxsw_sp_switchdev_event_work, work);
+       struct net_device *dev = switchdev_work->dev;
+       struct switchdev_notifier_fdb_info *fdb_info;
+       struct mlxsw_sp_port *mlxsw_sp_port;
+       int err;
+
+       rtnl_lock();
+       mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
+       if (!mlxsw_sp_port)
+               goto out;
+
+       switch (switchdev_work->event) {
+       case SWITCHDEV_FDB_ADD_TO_DEVICE:
+               fdb_info = &switchdev_work->fdb_info;
+               err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
+               if (err)
+                       break;
+               mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
+                                           fdb_info->addr,
+                                           fdb_info->vid, dev);
+               break;
+       case SWITCHDEV_FDB_DEL_TO_DEVICE:
+               fdb_info = &switchdev_work->fdb_info;
+               mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
+               break;
+       }
+
+out:
+       rtnl_unlock();
+       kfree(switchdev_work->fdb_info.addr);
+       kfree(switchdev_work);
+       dev_put(dev);
+}
+
+/* Called under rcu_read_lock() */
+static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
+                                   unsigned long event, void *ptr)
+{
+       struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+       struct mlxsw_sp_switchdev_event_work *switchdev_work;
+       struct switchdev_notifier_fdb_info *fdb_info = ptr;
+
+       if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
+               return NOTIFY_DONE;
+
+       switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+       if (!switchdev_work)
+               return NOTIFY_BAD;
+
+       INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work);
+       switchdev_work->dev = dev;
+       switchdev_work->event = event;
+
+       switch (event) {
+       case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
+       case SWITCHDEV_FDB_DEL_TO_DEVICE:
+               memcpy(&switchdev_work->fdb_info, ptr,
+                      sizeof(switchdev_work->fdb_info));
+               switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+               ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+                               fdb_info->addr);
+               /* Take a reference on the device. This can be either
+                * upper device containig mlxsw_sp_port or just a
+                * mlxsw_sp_port
+                */
+               dev_hold(dev);
+               break;
+       default:
+               kfree(switchdev_work);
+               return NOTIFY_DONE;
+       }
+
+       mlxsw_core_schedule_work(&switchdev_work->work);
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block mlxsw_sp_switchdev_notifier = {
+       .notifier_call = mlxsw_sp_switchdev_event,
+};
+
 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
 {
        struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
@@ -2060,6 +1951,13 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
                dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
                return err;
        }
+
+       err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
+               return err;
+       }
+
        INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
        bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
        mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
@@ -2069,6 +1967,8 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
 {
        cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
+       unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
+
 }
 
 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
index de07517da1bd7b011377393826ffb05845769897..396b93f54823f9d44badfd682ebf7a21ba5de5a6 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/slab.h>
 
 #include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_nffw.h"
 #include "nfp_app.h"
 #include "nfp_main.h"
 
@@ -43,6 +44,13 @@ static const struct nfp_app_type *apps[] = {
        &app_bpf,
 };
 
+const char *nfp_app_mip_name(struct nfp_app *app)
+{
+       if (!app || !app->pf->mip)
+               return "";
+       return nfp_mip_name(app->pf->mip);
+}
+
 struct sk_buff *nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size)
 {
        struct sk_buff *skb;
index 3fbf68f8577c005f6ffbacfd923b6d6d0d932995..f5e373fa8c3bbc1a815e87854a9cdebb2c80f98c 100644 (file)
@@ -216,6 +216,7 @@ static inline void nfp_app_ctrl_rx(struct nfp_app *app, struct sk_buff *skb)
        app->type->ctrl_msg_rx(app, skb);
 }
 
+const char *nfp_app_mip_name(struct nfp_app *app);
 struct sk_buff *nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size);
 
 struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id);
index 1a33ad9f4170c0f75e4af35e9a38902b620185a6..83c65e6291eee195beb5fb958bb8688bcb22d5dc 100644 (file)
@@ -80,7 +80,7 @@ int nfp_app_nic_vnic_init(struct nfp_app *app, struct nfp_net *nn,
        if (err)
                return err < 0 ? err : 0;
 
-       nfp_net_get_mac_addr(nn, app->cpp, id);
+       nfp_net_get_mac_addr(app->pf, nn, id);
 
        return 0;
 }
index 0c2e64d217b5aa6f14725abb5faacb65a7e4ae55..4e59dcb78c3697ed12f9e7511a687ccddd4f91dc 100644 (file)
@@ -77,7 +77,7 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf)
 {
        int err;
 
-       pf->limit_vfs = nfp_rtsym_read_le(pf->cpp, "nfd_vf_cfg_max_vfs", &err);
+       pf->limit_vfs = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err);
        if (!err)
                return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs);
 
@@ -170,7 +170,7 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf)
                return NULL;
        }
 
-       fw_model = nfp_hwinfo_lookup(pf->cpp, "assembly.partno");
+       fw_model = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno");
        if (!fw_model) {
                dev_err(&pdev->dev, "Error: can't read part number\n");
                return NULL;
@@ -358,21 +358,26 @@ static int nfp_pci_probe(struct pci_dev *pdev,
                goto err_disable_msix;
        }
 
+       pf->hwinfo = nfp_hwinfo_read(pf->cpp);
+
        dev_info(&pdev->dev, "Assembly: %s%s%s-%s CPLD: %s\n",
-                nfp_hwinfo_lookup(pf->cpp, "assembly.vendor"),
-                nfp_hwinfo_lookup(pf->cpp, "assembly.partno"),
-                nfp_hwinfo_lookup(pf->cpp, "assembly.serial"),
-                nfp_hwinfo_lookup(pf->cpp, "assembly.revision"),
-                nfp_hwinfo_lookup(pf->cpp, "cpld.version"));
+                nfp_hwinfo_lookup(pf->hwinfo, "assembly.vendor"),
+                nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno"),
+                nfp_hwinfo_lookup(pf->hwinfo, "assembly.serial"),
+                nfp_hwinfo_lookup(pf->hwinfo, "assembly.revision"),
+                nfp_hwinfo_lookup(pf->hwinfo, "cpld.version"));
 
        err = devlink_register(devlink, &pdev->dev);
        if (err)
-               goto err_cpp_free;
+               goto err_hwinfo_free;
 
        err = nfp_nsp_init(pdev, pf);
        if (err)
                goto err_devlink_unreg;
 
+       pf->mip = nfp_mip_open(pf->cpp);
+       pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip);
+
        err = nfp_pcie_sriov_read_nfd_limit(pf);
        if (err)
                goto err_fw_unload;
@@ -394,13 +399,16 @@ static int nfp_pci_probe(struct pci_dev *pdev,
 err_sriov_unlimit:
        pci_sriov_set_totalvfs(pf->pdev, 0);
 err_fw_unload:
+       kfree(pf->rtbl);
+       nfp_mip_close(pf->mip);
        if (pf->fw_loaded)
                nfp_fw_unload(pf);
        kfree(pf->eth_tbl);
        kfree(pf->nspi);
 err_devlink_unreg:
        devlink_unregister(devlink);
-err_cpp_free:
+err_hwinfo_free:
+       kfree(pf->hwinfo);
        nfp_cpp_free(pf->cpp);
 err_disable_msix:
        pci_set_drvdata(pdev, NULL);
@@ -430,10 +438,13 @@ static void nfp_pci_remove(struct pci_dev *pdev)
 
        devlink_unregister(devlink);
 
+       kfree(pf->rtbl);
+       nfp_mip_close(pf->mip);
        if (pf->fw_loaded)
                nfp_fw_unload(pf);
 
        pci_set_drvdata(pdev, NULL);
+       kfree(pf->hwinfo);
        nfp_cpp_free(pf->cpp);
 
        kfree(pf->eth_tbl);
index 37832853b0b39312359d344b65973c26bd041109..88724f8d0dcdd91ed8734d2d299b366f189b9afb 100644 (file)
@@ -54,8 +54,11 @@ struct pci_dev;
 struct nfp_cpp;
 struct nfp_cpp_area;
 struct nfp_eth_table;
+struct nfp_hwinfo;
+struct nfp_mip;
 struct nfp_net;
 struct nfp_nsp_identify;
+struct nfp_rtsym_table;
 
 /**
  * struct nfp_pf - NFP PF-specific device structure
@@ -70,6 +73,9 @@ struct nfp_nsp_identify;
  * @num_vfs:           Number of SR-IOV VFs enabled
  * @fw_loaded:         Is the firmware loaded?
  * @ctrl_vnic:         Pointer to the control vNIC if available
+ * @mip:               MIP handle
+ * @rtbl:              RTsym table
+ * @hwinfo:            HWInfo table
  * @eth_tbl:           NSP ETH table
  * @nspi:              NSP identification info
  * @hwmon_dev:         pointer to hwmon device
@@ -101,6 +107,9 @@ struct nfp_pf {
 
        struct nfp_net *ctrl_vnic;
 
+       const struct nfp_mip *mip;
+       struct nfp_rtsym_table *rtbl;
+       struct nfp_hwinfo *hwinfo;
        struct nfp_eth_table *eth_tbl;
        struct nfp_nsp_identify *nspi;
 
@@ -130,7 +139,7 @@ void nfp_hwmon_unregister(struct nfp_pf *pf);
 struct nfp_eth_table_port *
 nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id);
 void
-nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id);
+nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id);
 
 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
 
index 4f0df63de626484022c5cb4a5922385bd38051a8..49d1756d6a8e0e74b4045b58e6b34c546ad48cc1 100644 (file)
@@ -2994,11 +2994,14 @@ static void nfp_net_stat64(struct net_device *netdev,
 }
 
 static int
-nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
-                struct tc_to_netdev *tc)
+nfp_net_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index,
+                __be16 proto, struct tc_to_netdev *tc)
 {
        struct nfp_net *nn = netdev_priv(netdev);
 
+       if (chain_index)
+               return -EOPNOTSUPP;
+
        return nfp_app_setup_tc(nn->app, netdev, handle, proto, tc);
 }
 
index 83664ca2521382cde49fdaa1ec7d8969bd96a6c4..6e31355c3567ac62b32837044fcd918d41932f09 100644 (file)
@@ -166,10 +166,10 @@ static void nfp_net_get_drvinfo(struct net_device *netdev,
 
        nfp_net_get_nspinfo(nn->app, nsp_version);
        snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-                "%d.%d.%d.%d %s %s",
+                "%d.%d.%d.%d %s %s %s",
                 nn->fw_ver.resv, nn->fw_ver.class,
                 nn->fw_ver.major, nn->fw_ver.minor, nsp_version,
-                nfp_app_name(nn->app));
+                nfp_app_mip_name(nn->app), nfp_app_name(nn->app));
        strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
                sizeof(drvinfo->bus_info));
 
index 5f27703060c242b0b2d49f49dce0a72b49b48a50..bc2bc0886176e5a3bff856aa2037715ae14543f9 100644 (file)
 
 #define NFP_PF_CSR_SLICE_SIZE  (32 * 1024)
 
-static int nfp_is_ready(struct nfp_cpp *cpp)
+static int nfp_is_ready(struct nfp_pf *pf)
 {
        const char *cp;
        long state;
        int err;
 
-       cp = nfp_hwinfo_lookup(cpp, "board.state");
+       cp = nfp_hwinfo_lookup(pf->hwinfo, "board.state");
        if (!cp)
                return 0;
 
@@ -134,15 +134,15 @@ static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp,
 
 /**
  * nfp_net_get_mac_addr() - Get the MAC address.
+ * @pf:       NFP PF handle
  * @nn:       NFP Network structure
- * @cpp:      NFP CPP handle
  * @id:              NFP port id
  *
  * First try to get the MAC address from NSP ETH table. If that
  * fails try HWInfo.  As a last resort generate a random address.
  */
 void
-nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id)
+nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
 {
        struct nfp_eth_table_port *eth_port;
        struct nfp_net_dp *dp = &nn->dp;
@@ -159,7 +159,7 @@ nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id)
 
        snprintf(name, sizeof(name), "eth%d.mac", id);
 
-       mac_str = nfp_hwinfo_lookup(cpp, name);
+       mac_str = nfp_hwinfo_lookup(pf->hwinfo, name);
        if (!mac_str) {
                dev_warn(dp->dev, "Can't lookup MAC address. Generate\n");
                eth_hw_addr_random(dp->netdev);
@@ -201,7 +201,7 @@ nfp_net_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format,
 
        snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp));
 
-       val = nfp_rtsym_read_le(pf->cpp, name, &err);
+       val = nfp_rtsym_read_le(pf->rtbl, name, &err);
        if (err) {
                if (err == -ENOENT)
                        return default_val;
@@ -234,7 +234,7 @@ nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
        snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt,
                 nfp_cppcore_pcie_unit(pf->cpp));
 
-       sym = nfp_rtsym_lookup(pf->cpp, pf_symbol);
+       sym = nfp_rtsym_lookup(pf->rtbl, pf_symbol);
        if (!sym) {
                nfp_err(pf->cpp, "Failed to find PF symbol %s\n", pf_symbol);
                return (u8 __iomem *)ERR_PTR(-ENOENT);
@@ -713,7 +713,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
        INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics);
 
        /* Verify that the board has completed initialization */
-       if (!nfp_is_ready(pf->cpp)) {
+       if (!nfp_is_ready(pf)) {
                nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n");
                return -EINVAL;
        }
@@ -813,6 +813,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
        nfp_cpp_area_release_free(pf->data_vnic_bar);
 err_unlock:
        mutex_unlock(&pf->lock);
+       cancel_work_sync(&pf->port_refresh_work);
        return err;
 }
 
index 94641b4c2c553c8d13b3403cb43005e3f19c48a3..1a8d04a1e11327f8ec4411c258469bca728ffb32 100644 (file)
@@ -46,7 +46,9 @@
 
 /* Implemented in nfp_hwinfo.c */
 
-const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup);
+struct nfp_hwinfo;
+struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp);
+const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup);
 
 /* Implemented in nfp_nsp.c, low level functions */
 
index 0a46c0984e6869b385a33ccce2f98047b603a8d3..25a967158ce9cb6706e617d79d642f05630bcc99 100644 (file)
@@ -222,13 +222,6 @@ u32 nfp_cpp_model(struct nfp_cpp *cpp);
 u16 nfp_cpp_interface(struct nfp_cpp *cpp);
 int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial);
 
-void *nfp_hwinfo_cache(struct nfp_cpp *cpp);
-void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val);
-void *nfp_rtsym_cache(struct nfp_cpp *cpp);
-void nfp_rtsym_cache_set(struct nfp_cpp *cpp, void *val);
-
-void nfp_nffw_cache_flush(struct nfp_cpp *cpp);
-
 struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp,
                                                  u32 cpp_id,
                                                  const char *name,
index 5672d309d07db46fcfdc47f64549591f455cea65..9b69dcf87be934b10d57727626388a2b0b76e958 100644 (file)
@@ -76,10 +76,6 @@ struct nfp_cpp_resource {
  * @serial:            chip serial number
  * @imb_cat_table:     CPP Mapping Table
  *
- * Following fields can be used only in probe() or with rtnl held:
- * @hwinfo:            HWInfo database fetched from the device
- * @rtsym:             firmware run time symbols
- *
  * Following fields use explicit locking:
  * @resource_list:     NFP CPP resource list
  * @resource_lock:     protects @resource_list
@@ -107,9 +103,6 @@ struct nfp_cpp {
 
        struct mutex area_cache_mutex;
        struct list_head area_cache_list;
-
-       void *hwinfo;
-       void *rtsym;
 };
 
 /* Element of the area_cache_list */
@@ -233,9 +226,6 @@ void nfp_cpp_free(struct nfp_cpp *cpp)
        if (cpp->op->free)
                cpp->op->free(cpp);
 
-       kfree(cpp->hwinfo);
-       kfree(cpp->rtsym);
-
        device_unregister(&cpp->dev);
 
        kfree(cpp);
@@ -276,39 +266,6 @@ int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial)
        return sizeof(cpp->serial);
 }
 
-void *nfp_hwinfo_cache(struct nfp_cpp *cpp)
-{
-       return cpp->hwinfo;
-}
-
-void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val)
-{
-       cpp->hwinfo = val;
-}
-
-void *nfp_rtsym_cache(struct nfp_cpp *cpp)
-{
-       return cpp->rtsym;
-}
-
-void nfp_rtsym_cache_set(struct nfp_cpp *cpp, void *val)
-{
-       cpp->rtsym = val;
-}
-
-/**
- * nfp_nffw_cache_flush() - Flush cached firmware information
- * @cpp:       NFP CPP handle
- *
- * Flush cached firmware information.  This function should be called
- * every time firmware is loaded on unloaded.
- */
-void nfp_nffw_cache_flush(struct nfp_cpp *cpp)
-{
-       kfree(nfp_rtsym_cache(cpp));
-       nfp_rtsym_cache_set(cpp, NULL);
-}
-
 /**
  * nfp_cpp_area_alloc_with_name() - allocate a new CPP area
  * @cpp:       CPP device handle
index 8d8f311ffa6edb19ecc5af6340bb8191ae199627..4f24aff1e772b16a6fb4af8d0bdf85bbf33e966f 100644 (file)
@@ -178,7 +178,8 @@ hwinfo_db_validate(struct nfp_cpp *cpp, struct nfp_hwinfo *db, u32 len)
        return hwinfo_db_walk(cpp, db, size);
 }
 
-static int hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size)
+static struct nfp_hwinfo *
+hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size)
 {
        struct nfp_hwinfo *header;
        struct nfp_resource *res;
@@ -196,7 +197,7 @@ static int hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size)
                nfp_resource_release(res);
 
                if (*cpp_size < HWINFO_SIZE_MIN)
-                       return -ENOENT;
+                       return NULL;
        } else if (PTR_ERR(res) == -ENOENT) {
                /* Try getting the HWInfo table from the 'classic' location */
                cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU,
@@ -204,101 +205,86 @@ static int hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size)
                cpp_addr = 0x30000;
                *cpp_size = 0x0e000;
        } else {
-               return PTR_ERR(res);
+               return NULL;
        }
 
        db = kmalloc(*cpp_size + 1, GFP_KERNEL);
        if (!db)
-               return -ENOMEM;
+               return NULL;
 
        err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size);
-       if (err != *cpp_size) {
-               kfree(db);
-               return err < 0 ? err : -EIO;
-       }
+       if (err != *cpp_size)
+               goto exit_free;
 
        header = (void *)db;
-       if (nfp_hwinfo_is_updating(header)) {
-               kfree(db);
-               return -EBUSY;
-       }
+       if (nfp_hwinfo_is_updating(header))
+               goto exit_free;
 
        if (le32_to_cpu(header->version) != NFP_HWINFO_VERSION_2) {
                nfp_err(cpp, "Unknown HWInfo version: 0x%08x\n",
                        le32_to_cpu(header->version));
-               kfree(db);
-               return -EINVAL;
+               goto exit_free;
        }
 
        /* NULL-terminate for safety */
        db[*cpp_size] = '\0';
 
-       nfp_hwinfo_cache_set(cpp, db);
-
-       return 0;
+       return (void *)db;
+exit_free:
+       kfree(db);
+       return NULL;
 }
 
-static int hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size)
+static struct nfp_hwinfo *hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size)
 {
        const unsigned long wait_until = jiffies + HWINFO_WAIT * HZ;
+       struct nfp_hwinfo *db;
        int err;
 
        for (;;) {
                const unsigned long start_time = jiffies;
 
-               err = hwinfo_try_fetch(cpp, hwdb_size);
-               if (!err)
-                       return 0;
+               db = hwinfo_try_fetch(cpp, hwdb_size);
+               if (db)
+                       return db;
 
                err = msleep_interruptible(100);
                if (err || time_after(start_time, wait_until)) {
                        nfp_err(cpp, "NFP access error\n");
-                       return -EIO;
+                       return NULL;
                }
        }
 }
 
-static int nfp_hwinfo_load(struct nfp_cpp *cpp)
+struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp)
 {
        struct nfp_hwinfo *db;
        size_t hwdb_size = 0;
        int err;
 
-       err = hwinfo_fetch(cpp, &hwdb_size);
-       if (err)
-               return err;
+       db = hwinfo_fetch(cpp, &hwdb_size);
+       if (!db)
+               return NULL;
 
-       db = nfp_hwinfo_cache(cpp);
        err = hwinfo_db_validate(cpp, db, hwdb_size);
        if (err) {
                kfree(db);
-               nfp_hwinfo_cache_set(cpp, NULL);
-               return err;
+               return NULL;
        }
 
-       return 0;
+       return db;
 }
 
 /**
  * nfp_hwinfo_lookup() - Find a value in the HWInfo table by name
- * @cpp:       NFP CPP handle
+ * @hwinfo:    NFP HWinfo table
  * @lookup:    HWInfo name to search for
  *
  * Return: Value of the HWInfo name, or NULL
  */
-const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup)
+const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup)
 {
        const char *key, *val, *end;
-       struct nfp_hwinfo *hwinfo;
-       int err;
-
-       hwinfo = nfp_hwinfo_cache(cpp);
-       if (!hwinfo) {
-               err = nfp_hwinfo_load(cpp);
-               if (err)
-                       return NULL;
-               hwinfo = nfp_hwinfo_cache(cpp);
-       }
 
        if (!hwinfo || !lookup)
                return NULL;
index 3d15dd03647e8383a981f1c67b1cd62ab893f897..5f193fe2d69e8d98ddbc3006806b318aba6e2874 100644 (file)
@@ -141,6 +141,8 @@ const struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp)
                return NULL;
        }
 
+       mip->name[sizeof(mip->name) - 1] = 0;
+
        return mip;
 }
 
@@ -149,6 +151,11 @@ void nfp_mip_close(const struct nfp_mip *mip)
        kfree(mip);
 }
 
+const char *nfp_mip_name(const struct nfp_mip *mip)
+{
+       return mip->name;
+}
+
 /**
  * nfp_mip_symtab() - Get the address and size of the MIP symbol table
  * @mip:       MIP handle
index 988badd230d1f210496a8b41dab5516e37602d28..d27d29782a1262be31f7e067122ec2f2bcca2f18 100644 (file)
@@ -55,6 +55,7 @@ struct nfp_mip;
 const struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp);
 void nfp_mip_close(const struct nfp_mip *mip);
 
+const char *nfp_mip_name(const struct nfp_mip *mip);
 void nfp_mip_symtab(const struct nfp_mip *mip, u32 *addr, u32 *size);
 void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size);
 
@@ -87,9 +88,16 @@ struct nfp_rtsym {
        int domain;
 };
 
-int nfp_rtsym_count(struct nfp_cpp *cpp);
-const struct nfp_rtsym *nfp_rtsym_get(struct nfp_cpp *cpp, int idx);
-const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_cpp *cpp, const char *name);
-u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error);
+struct nfp_rtsym_table;
+
+struct nfp_rtsym_table *nfp_rtsym_table_read(struct nfp_cpp *cpp);
+struct nfp_rtsym_table *
+__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip);
+int nfp_rtsym_count(struct nfp_rtsym_table *rtbl);
+const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx);
+const struct nfp_rtsym *
+nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name);
+u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
+                     int *error);
 
 #endif /* NFP_NFFW_H */
index eefdb756d74ea1af78302cc9b4d93cee9f84ca94..37364555c42b3fe60438713c4b7ffd1ca3d4a9a6 100644 (file)
@@ -474,13 +474,7 @@ int nfp_nsp_wait(struct nfp_nsp *state)
 
 int nfp_nsp_device_soft_reset(struct nfp_nsp *state)
 {
-       int err;
-
-       err = nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0);
-
-       nfp_nffw_cache_flush(state->cpp);
-
-       return err;
+       return nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0);
 }
 
 int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw)
index 0e3870ecfb8cd69d99fafc03a6a20748dc29de79..203f9cbae0fbc79271f4e6c724a90acb89a20137 100644 (file)
@@ -65,7 +65,8 @@ struct nfp_rtsym_entry {
        __le32  size_lo;
 };
 
-struct nfp_rtsym_cache {
+struct nfp_rtsym_table {
+       struct nfp_cpp *cpp;
        int num;
        char *strtab;
        struct nfp_rtsym symtab[];
@@ -78,7 +79,7 @@ static int nfp_meid(u8 island_id, u8 menum)
 }
 
 static void
-nfp_rtsym_sw_entry_init(struct nfp_rtsym_cache *cache, u32 strtab_size,
+nfp_rtsym_sw_entry_init(struct nfp_rtsym_table *cache, u32 strtab_size,
                        struct nfp_rtsym *sw, struct nfp_rtsym_entry *fw)
 {
        sw->type = fw->type;
@@ -106,26 +107,36 @@ nfp_rtsym_sw_entry_init(struct nfp_rtsym_cache *cache, u32 strtab_size,
                sw->domain = -1;
 }
 
-static int nfp_rtsymtab_probe(struct nfp_cpp *cpp)
+struct nfp_rtsym_table *nfp_rtsym_table_read(struct nfp_cpp *cpp)
+{
+       struct nfp_rtsym_table *rtbl;
+       const struct nfp_mip *mip;
+
+       mip = nfp_mip_open(cpp);
+       rtbl = __nfp_rtsym_table_read(cpp, mip);
+       nfp_mip_close(mip);
+
+       return rtbl;
+}
+
+struct nfp_rtsym_table *
+__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip)
 {
        const u32 dram = NFP_CPP_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0) |
                NFP_ISL_EMEM0;
        u32 strtab_addr, symtab_addr, strtab_size, symtab_size;
        struct nfp_rtsym_entry *rtsymtab;
-       struct nfp_rtsym_cache *cache;
-       const struct nfp_mip *mip;
+       struct nfp_rtsym_table *cache;
        int err, n, size;
 
-       mip = nfp_mip_open(cpp);
        if (!mip)
-               return -EIO;
+               return NULL;
 
        nfp_mip_strtab(mip, &strtab_addr, &strtab_size);
        nfp_mip_symtab(mip, &symtab_addr, &symtab_size);
-       nfp_mip_close(mip);
 
        if (!symtab_size || !strtab_size || symtab_size % sizeof(*rtsymtab))
-               return -ENXIO;
+               return NULL;
 
        /* Align to 64 bits */
        symtab_size = round_up(symtab_size, 8);
@@ -133,27 +144,26 @@ static int nfp_rtsymtab_probe(struct nfp_cpp *cpp)
 
        rtsymtab = kmalloc(symtab_size, GFP_KERNEL);
        if (!rtsymtab)
-               return -ENOMEM;
+               return NULL;
 
        size = sizeof(*cache);
        size += symtab_size / sizeof(*rtsymtab) * sizeof(struct nfp_rtsym);
        size += strtab_size + 1;
        cache = kmalloc(size, GFP_KERNEL);
-       if (!cache) {
-               err = -ENOMEM;
-               goto err_free_rtsym_raw;
-       }
+       if (!cache)
+               goto exit_free_rtsym_raw;
 
+       cache->cpp = cpp;
        cache->num = symtab_size / sizeof(*rtsymtab);
        cache->strtab = (void *)&cache->symtab[cache->num];
 
        err = nfp_cpp_read(cpp, dram, symtab_addr, rtsymtab, symtab_size);
        if (err != symtab_size)
-               goto err_free_cache;
+               goto exit_free_cache;
 
        err = nfp_cpp_read(cpp, dram, strtab_addr, cache->strtab, strtab_size);
        if (err != strtab_size)
-               goto err_free_cache;
+               goto exit_free_cache;
        cache->strtab[strtab_size] = '\0';
 
        for (n = 0; n < cache->num; n++)
@@ -161,97 +171,71 @@ static int nfp_rtsymtab_probe(struct nfp_cpp *cpp)
                                        &cache->symtab[n], &rtsymtab[n]);
 
        kfree(rtsymtab);
-       nfp_rtsym_cache_set(cpp, cache);
-       return 0;
 
-err_free_cache:
+       return cache;
+
+exit_free_cache:
        kfree(cache);
-err_free_rtsym_raw:
+exit_free_rtsym_raw:
        kfree(rtsymtab);
-       return err;
-}
-
-static struct nfp_rtsym_cache *nfp_rtsym(struct nfp_cpp *cpp)
-{
-       struct nfp_rtsym_cache *cache;
-       int err;
-
-       cache = nfp_rtsym_cache(cpp);
-       if (cache)
-               return cache;
-
-       err = nfp_rtsymtab_probe(cpp);
-       if (err < 0)
-               return ERR_PTR(err);
-
-       return nfp_rtsym_cache(cpp);
+       return NULL;
 }
 
 /**
  * nfp_rtsym_count() - Get the number of RTSYM descriptors
- * @cpp:       NFP CPP handle
+ * @rtbl:      NFP RTsym table
  *
- * Return: Number of RTSYM descriptors, or -ERRNO
+ * Return: Number of RTSYM descriptors
  */
-int nfp_rtsym_count(struct nfp_cpp *cpp)
+int nfp_rtsym_count(struct nfp_rtsym_table *rtbl)
 {
-       struct nfp_rtsym_cache *cache;
-
-       cache = nfp_rtsym(cpp);
-       if (IS_ERR(cache))
-               return PTR_ERR(cache);
-
-       return cache->num;
+       if (!rtbl)
+               return -EINVAL;
+       return rtbl->num;
 }
 
 /**
  * nfp_rtsym_get() - Get the Nth RTSYM descriptor
- * @cpp:       NFP CPP handle
+ * @rtbl:      NFP RTsym table
  * @idx:       Index (0-based) of the RTSYM descriptor
  *
  * Return: const pointer to a struct nfp_rtsym descriptor, or NULL
  */
-const struct nfp_rtsym *nfp_rtsym_get(struct nfp_cpp *cpp, int idx)
+const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx)
 {
-       struct nfp_rtsym_cache *cache;
-
-       cache = nfp_rtsym(cpp);
-       if (IS_ERR(cache))
+       if (!rtbl)
                return NULL;
-
-       if (idx >= cache->num)
+       if (idx >= rtbl->num)
                return NULL;
 
-       return &cache->symtab[idx];
+       return &rtbl->symtab[idx];
 }
 
 /**
  * nfp_rtsym_lookup() - Return the RTSYM descriptor for a symbol name
- * @cpp:       NFP CPP handle
+ * @rtbl:      NFP RTsym table
  * @name:      Symbol name
  *
  * Return: const pointer to a struct nfp_rtsym descriptor, or NULL
  */
-const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_cpp *cpp, const char *name)
+const struct nfp_rtsym *
+nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name)
 {
-       struct nfp_rtsym_cache *cache;
        int n;
 
-       cache = nfp_rtsym(cpp);
-       if (IS_ERR(cache))
+       if (!rtbl)
                return NULL;
 
-       for (n = 0; n < cache->num; n++) {
-               if (strcmp(name, cache->symtab[n].name) == 0)
-                       return &cache->symtab[n];
-       }
+       for (n = 0; n < rtbl->num; n++)
+               if (strcmp(name, rtbl->symtab[n].name) == 0)
+                       return &rtbl->symtab[n];
 
        return NULL;
 }
 
 /**
  * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol
- * @cpp:       NFP CPP handle
+ * @rtbl:      NFP RTsym table
  * @name:      Symbol name
  * @error:     Poniter to error code (optional)
  *
@@ -261,14 +245,15 @@ const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_cpp *cpp, const char *name)
  *
  * Return: value read, on error sets the error and returns ~0ULL.
  */
-u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error)
+u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
+                     int *error)
 {
        const struct nfp_rtsym *sym;
        u32 val32, id;
        u64 val;
        int err;
 
-       sym = nfp_rtsym_lookup(cpp, name);
+       sym = nfp_rtsym_lookup(rtbl, name);
        if (!sym) {
                err = -ENOENT;
                goto exit;
@@ -278,14 +263,14 @@ u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error)
 
        switch (sym->size) {
        case 4:
-               err = nfp_cpp_readl(cpp, id, sym->addr, &val32);
+               err = nfp_cpp_readl(rtbl->cpp, id, sym->addr, &val32);
                val = val32;
                break;
        case 8:
-               err = nfp_cpp_readq(cpp, id, sym->addr, &val);
+               err = nfp_cpp_readq(rtbl->cpp, id, sym->addr, &val);
                break;
        default:
-               nfp_err(cpp,
+               nfp_err(rtbl->cpp,
                        "rtsym '%s' unsupported or non-scalar size: %lld\n",
                        name, sym->size);
                err = -EINVAL;
index d7afc42f766cefee67303dcc7a55a8270b93ebf0..14b08ee9e3ade1db0bf5053de37fa9e76f140e2b 100644 (file)
@@ -552,7 +552,6 @@ struct qed_hwfn {
 #endif
 
        struct z_stream_s               *stream;
-       struct qed_roce_ll2_info        *ll2;
 };
 
 struct pci_params {
index f67ed6d39dfd4043f54f73c4fc97c95be847950d..0e26193156e44ea426ba15f3c9dbbbcd64465931 100644 (file)
@@ -73,7 +73,6 @@ struct qed_cb_ll2_info {
        int rx_cnt;
        u32 rx_size;
        u8 handle;
-       bool frags_mapped;
 
        /* Lock protecting LL2 buffer lists in sleepless context */
        spinlock_t lock;
@@ -89,13 +88,14 @@ struct qed_ll2_buffer {
        dma_addr_t phys_addr;
 };
 
-static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
+static void qed_ll2b_complete_tx_packet(void *cxt,
                                        u8 connection_handle,
                                        void *cookie,
                                        dma_addr_t first_frag_addr,
                                        bool b_last_fragment,
                                        bool b_last_packet)
 {
+       struct qed_hwfn *p_hwfn = cxt;
        struct qed_dev *cdev = p_hwfn->cdev;
        struct sk_buff *skb = cookie;
 
@@ -107,12 +107,6 @@ static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
                cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
                                      b_last_fragment);
 
-       if (cdev->ll2->frags_mapped)
-               /* Case where mapped frags were received, need to
-                * free skb with nr_frags marked as 0
-                */
-               skb_shinfo(skb)->nr_frags = 0;
-
        dev_kfree_skb_any(skb);
 }
 
@@ -164,42 +158,34 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev)
                qed_ll2_dealloc_buffer(cdev, buffer);
 }
 
-static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
-                                       u8 connection_handle,
-                                       struct qed_ll2_rx_packet *p_pkt,
-                                       struct core_rx_fast_path_cqe *p_cqe,
-                                       bool b_last_packet)
+void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
 {
-       u16 packet_length = le16_to_cpu(p_cqe->packet_length);
-       struct qed_ll2_buffer *buffer = p_pkt->cookie;
+       struct qed_hwfn *p_hwfn = cxt;
+       struct qed_ll2_buffer *buffer = data->cookie;
        struct qed_dev *cdev = p_hwfn->cdev;
-       u16 vlan = le16_to_cpu(p_cqe->vlan);
-       u32 opaque_data_0, opaque_data_1;
-       u8 pad = p_cqe->placement_offset;
        dma_addr_t new_phys_addr;
        struct sk_buff *skb;
        bool reuse = false;
        int rc = -EINVAL;
        u8 *new_data;
 
-       opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
-       opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
-
        DP_VERBOSE(p_hwfn,
                   (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
                   "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
-                  (u64)p_pkt->rx_buf_addr, pad, packet_length,
-                  le16_to_cpu(p_cqe->parse_flags.flags), vlan,
-                  opaque_data_0, opaque_data_1);
+                  (u64)data->rx_buf_addr,
+                  data->u.placement_offset,
+                  data->length.packet_length,
+                  data->parse_flags,
+                  data->vlan, data->opaque_data_0, data->opaque_data_1);
 
        if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
                print_hex_dump(KERN_INFO, "",
                               DUMP_PREFIX_OFFSET, 16, 1,
-                              buffer->data, packet_length, false);
+                              buffer->data, data->length.packet_length, false);
        }
 
        /* Determine if data is valid */
-       if (packet_length < ETH_HLEN)
+       if (data->length.packet_length < ETH_HLEN)
                reuse = true;
 
        /* Allocate a replacement for buffer; Reuse upon failure */
@@ -219,9 +205,9 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
                goto out_post;
        }
 
-       pad += NET_SKB_PAD;
-       skb_reserve(skb, pad);
-       skb_put(skb, packet_length);
+       data->u.placement_offset += NET_SKB_PAD;
+       skb_reserve(skb, data->u.placement_offset);
+       skb_put(skb, data->length.packet_length);
        skb_checksum_none_assert(skb);
 
        /* Get parital ethernet information instead of eth_type_trans(),
@@ -232,10 +218,12 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
 
        /* Pass SKB onward */
        if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
-               if (vlan)
-                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
+               if (data->vlan)
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                              data->vlan);
                cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
-                                     opaque_data_0, opaque_data_1);
+                                     data->opaque_data_0,
+                                     data->opaque_data_1);
        }
 
        /* Update Buffer information and update FW producer */
@@ -321,7 +309,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
                list_del(&p_pkt->list_entry);
                b_last_packet = list_empty(&p_tx->active_descq);
                list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
-               if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+               if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
                        struct qed_ooo_buffer *p_buffer;
 
                        p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -333,21 +321,12 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
                        b_last_frag =
                                p_tx->cur_completing_bd_idx == p_pkt->bd_used;
                        tx_frag = p_pkt->bds_set[0].tx_frag;
-                       if (p_ll2_conn->conn.gsi_enable)
-                               qed_ll2b_release_tx_gsi_packet(p_hwfn,
-                                                              p_ll2_conn->
-                                                              my_id,
-                                                              p_pkt->cookie,
-                                                              tx_frag,
-                                                              b_last_frag,
-                                                              b_last_packet);
-                       else
-                               qed_ll2b_complete_tx_packet(p_hwfn,
-                                                           p_ll2_conn->my_id,
-                                                           p_pkt->cookie,
-                                                           tx_frag,
-                                                           b_last_frag,
-                                                           b_last_packet);
+                       p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie,
+                                                     p_ll2_conn->my_id,
+                                                     p_pkt->cookie,
+                                                     tx_frag,
+                                                     b_last_frag,
+                                                     b_last_packet);
                }
        }
 }
@@ -360,7 +339,6 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
        struct qed_ll2_tx_packet *p_pkt;
        bool b_last_frag = false;
        unsigned long flags;
-       dma_addr_t tx_frag;
        int rc = -EINVAL;
 
        spin_lock_irqsave(&p_tx->lock, flags);
@@ -401,19 +379,13 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
                list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
 
                spin_unlock_irqrestore(&p_tx->lock, flags);
-               tx_frag = p_pkt->bds_set[0].tx_frag;
-               if (p_ll2_conn->conn.gsi_enable)
-                       qed_ll2b_complete_tx_gsi_packet(p_hwfn,
-                                                       p_ll2_conn->my_id,
-                                                       p_pkt->cookie,
-                                                       tx_frag,
-                                                       b_last_frag, !num_bds);
-               else
-                       qed_ll2b_complete_tx_packet(p_hwfn,
-                                                   p_ll2_conn->my_id,
-                                                   p_pkt->cookie,
-                                                   tx_frag,
-                                                   b_last_frag, !num_bds);
+
+               p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie,
+                                          p_ll2_conn->my_id,
+                                          p_pkt->cookie,
+                                          p_pkt->bds_set[0].tx_frag,
+                                          b_last_frag, !num_bds);
+
                spin_lock_irqsave(&p_tx->lock, flags);
        }
 
@@ -424,81 +396,71 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
        return rc;
 }
 
-static int
-qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
-                          struct qed_ll2_info *p_ll2_info,
-                          union core_rx_cqe_union *p_cqe,
-                          unsigned long lock_flags, bool b_last_cqe)
+static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
+                                 union core_rx_cqe_union *p_cqe,
+                                 struct qed_ll2_comp_rx_data *data)
 {
-       struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
-       struct qed_ll2_rx_packet *p_pkt = NULL;
-       u16 packet_length, parse_flags, vlan;
-       u32 src_mac_addrhi;
-       u16 src_mac_addrlo;
-
-       if (!list_empty(&p_rx->active_descq))
-               p_pkt = list_first_entry(&p_rx->active_descq,
-                                        struct qed_ll2_rx_packet, list_entry);
-       if (!p_pkt) {
-               DP_NOTICE(p_hwfn,
-                         "GSI Rx completion but active_descq is empty\n");
-               return -EIO;
-       }
-
-       list_del(&p_pkt->list_entry);
-       parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
-       packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
-       vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
-       src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
-       src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
-       if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
-               DP_NOTICE(p_hwfn,
-                         "Mismatch between active_descq and the LL2 Rx chain\n");
-       list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
-
-       spin_unlock_irqrestore(&p_rx->lock, lock_flags);
-       qed_ll2b_complete_rx_gsi_packet(p_hwfn,
-                                       p_ll2_info->my_id,
-                                       p_pkt->cookie,
-                                       p_pkt->rx_buf_addr,
-                                       packet_length,
-                                       p_cqe->rx_cqe_gsi.data_length_error,
-                                       parse_flags,
-                                       vlan,
-                                       src_mac_addrhi,
-                                       src_mac_addrlo, b_last_cqe);
-       spin_lock_irqsave(&p_rx->lock, lock_flags);
+       data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
+       data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
+       data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
+       data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
+       data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
+       data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
+}
 
-       return 0;
+static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
+                                 union core_rx_cqe_union *p_cqe,
+                                 struct qed_ll2_comp_rx_data *data)
+{
+       data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
+       data->length.packet_length =
+           le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
+       data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
+       data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
+       data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
+       data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
 }
 
-static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
-                                     struct qed_ll2_info *p_ll2_conn,
-                                     union core_rx_cqe_union *p_cqe,
-                                     unsigned long *p_lock_flags,
-                                     bool b_last_cqe)
+static int
+qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
+                             struct qed_ll2_info *p_ll2_conn,
+                             union core_rx_cqe_union *p_cqe,
+                             unsigned long *p_lock_flags, bool b_last_cqe)
 {
        struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
        struct qed_ll2_rx_packet *p_pkt = NULL;
+       struct qed_ll2_comp_rx_data data;
 
        if (!list_empty(&p_rx->active_descq))
                p_pkt = list_first_entry(&p_rx->active_descq,
                                         struct qed_ll2_rx_packet, list_entry);
        if (!p_pkt) {
                DP_NOTICE(p_hwfn,
-                         "LL2 Rx completion but active_descq is empty\n");
+                         "[%d] LL2 Rx completion but active_descq is empty\n",
+                         p_ll2_conn->input.conn_type);
+
                return -EIO;
        }
        list_del(&p_pkt->list_entry);
 
+       if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR)
+               qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
+       else
+               qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
        if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
                DP_NOTICE(p_hwfn,
                          "Mismatch between active_descq and the LL2 Rx chain\n");
+
        list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
 
+       data.connection_handle = p_ll2_conn->my_id;
+       data.cookie = p_pkt->cookie;
+       data.rx_buf_addr = p_pkt->rx_buf_addr;
+       data.b_last_packet = b_last_cqe;
+
        spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
-       qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
-                                   p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
+       p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data);
+
        spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
 
        return 0;
@@ -506,7 +468,7 @@ static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
 
 static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
 {
-       struct qed_ll2_info *p_ll2_conn = cookie;
+       struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
        struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
        union core_rx_cqe_union *cqe = NULL;
        u16 cq_new_idx = 0, cq_old_idx = 0;
@@ -520,7 +482,9 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
        while (cq_new_idx != cq_old_idx) {
                bool b_last_cqe = (cq_new_idx == cq_old_idx);
 
-               cqe = qed_chain_consume(&p_rx->rcq_chain);
+               cqe =
+                   (union core_rx_cqe_union *)
+                   qed_chain_consume(&p_rx->rcq_chain);
                cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
 
                DP_VERBOSE(p_hwfn,
@@ -534,13 +498,10 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
                        rc = -EINVAL;
                        break;
                case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
-                       rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
-                                                       cqe, flags, b_last_cqe);
-                       break;
                case CORE_RX_CQE_TYPE_REGULAR:
-                       rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
-                                                       cqe, &flags,
-                                                       b_last_cqe);
+                       rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
+                                                          cqe, &flags,
+                                                          b_last_cqe);
                        break;
                default:
                        rc = -EIO;
@@ -564,10 +525,6 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
        p_rx = &p_ll2_conn->rx_queue;
 
        while (!list_empty(&p_rx->active_descq)) {
-               dma_addr_t rx_buf_addr;
-               void *cookie;
-               bool b_last;
-
                p_pkt = list_first_entry(&p_rx->active_descq,
                                         struct qed_ll2_rx_packet, list_entry);
                if (!p_pkt)
@@ -575,22 +532,26 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
 
                list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
 
-               if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+               if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
                        struct qed_ooo_buffer *p_buffer;
 
                        p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
                        qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
                                                p_buffer);
                } else {
-                       rx_buf_addr = p_pkt->rx_buf_addr;
-                       cookie = p_pkt->cookie;
+                       dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr;
+                       void *cookie = p_pkt->cookie;
+                       bool b_last;
 
                        b_last = list_empty(&p_rx->active_descq);
+                       p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie,
+                                                     p_ll2_conn->my_id,
+                                                     cookie,
+                                                     rx_buf_addr, b_last);
                }
        }
 }
 
-#if IS_ENABLED(CONFIG_QED_ISCSI)
 static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
 {
        u8 bd_flags = 0;
@@ -740,12 +701,13 @@ static void
 qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
                          struct qed_ll2_info *p_ll2_conn)
 {
+       struct qed_ll2_tx_pkt_info tx_pkt;
        struct qed_ooo_buffer *p_buffer;
-       int rc;
        u16 l4_hdr_offset_w;
        dma_addr_t first_frag;
        u16 parse_flags;
        u8 bd_flags;
+       int rc;
 
        /* Submit Tx buffers here */
        while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
@@ -760,13 +722,18 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
                SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
                SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
 
-               rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
-                                              p_buffer->vlan, bd_flags,
-                                              l4_hdr_offset_w,
-                                              p_ll2_conn->conn.tx_dest, 0,
-                                              first_frag,
-                                              p_buffer->packet_length,
-                                              p_buffer, true);
+               memset(&tx_pkt, 0, sizeof(tx_pkt));
+               tx_pkt.num_of_bds = 1;
+               tx_pkt.vlan = p_buffer->vlan;
+               tx_pkt.bd_flags = bd_flags;
+               tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
+               tx_pkt.tx_dest = p_ll2_conn->tx_dest;
+               tx_pkt.first_frag = first_frag;
+               tx_pkt.first_frag_len = p_buffer->packet_length;
+               tx_pkt.cookie = p_buffer;
+
+               rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
+                                              &tx_pkt, true);
                if (rc) {
                        qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
                                                 p_buffer, false);
@@ -873,85 +840,6 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
        return 0;
 }
 
-static int
-qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
-                              struct qed_ll2_info *p_ll2_info,
-                              u16 rx_num_ooo_buffers, u16 mtu)
-{
-       struct qed_ooo_buffer *p_buf = NULL;
-       void *p_virt;
-       u16 buf_idx;
-       int rc = 0;
-
-       if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
-               return rc;
-
-       if (!rx_num_ooo_buffers)
-               return -EINVAL;
-
-       for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
-               p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
-               if (!p_buf) {
-                       rc = -ENOMEM;
-                       goto out;
-               }
-
-               p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
-               p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
-                                        ETH_CACHE_LINE_SIZE - 1) &
-                                       ~(ETH_CACHE_LINE_SIZE - 1);
-               p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
-                                           p_buf->rx_buffer_size,
-                                           &p_buf->rx_buffer_phys_addr,
-                                           GFP_KERNEL);
-               if (!p_virt) {
-                       kfree(p_buf);
-                       rc = -ENOMEM;
-                       goto out;
-               }
-
-               p_buf->rx_buffer_virt_addr = p_virt;
-               qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
-       }
-
-       DP_VERBOSE(p_hwfn, QED_MSG_LL2,
-                  "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
-                  rx_num_ooo_buffers, p_buf->rx_buffer_size);
-
-out:
-       return rc;
-}
-
-static void
-qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
-                                struct qed_ll2_info *p_ll2_conn)
-{
-       if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
-               return;
-
-       qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
-       qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
-}
-
-static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
-                                          struct qed_ll2_info *p_ll2_conn)
-{
-       struct qed_ooo_buffer *p_buffer;
-
-       if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
-               return;
-
-       qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
-       while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
-                                                  p_hwfn->p_ooo_info))) {
-               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
-                                 p_buffer->rx_buffer_size,
-                                 p_buffer->rx_buffer_virt_addr,
-                                 p_buffer->rx_buffer_phys_addr);
-               kfree(p_buffer);
-       }
-}
-
 static void qed_ll2_stop_ooo(struct qed_dev *cdev)
 {
        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
@@ -965,69 +853,11 @@ static void qed_ll2_stop_ooo(struct qed_dev *cdev)
        *handle = QED_LL2_UNUSED_HANDLE;
 }
 
-static int qed_ll2_start_ooo(struct qed_dev *cdev,
-                            struct qed_ll2_params *params)
-{
-       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
-       u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
-       struct qed_ll2_conn ll2_info = { 0 };
-       int rc;
-
-       ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
-       ll2_info.mtu = params->mtu;
-       ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
-       ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
-       ll2_info.tx_tc = OOO_LB_TC;
-       ll2_info.tx_dest = CORE_TX_DEST_LB;
-
-       rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
-                                       QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
-                                       handle);
-       if (rc) {
-               DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
-               goto out;
-       }
-
-       rc = qed_ll2_establish_connection(hwfn, *handle);
-       if (rc) {
-               DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
-               goto fail;
-       }
-
-       return 0;
-
-fail:
-       qed_ll2_release_connection(hwfn, *handle);
-out:
-       *handle = QED_LL2_UNUSED_HANDLE;
-       return rc;
-}
-#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
-static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
-                                    void *p_cookie) { return -EINVAL; }
-static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
-                                    void *p_cookie) { return -EINVAL; }
-static inline int
-qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
-                              struct qed_ll2_info *p_ll2_info,
-                              u16 rx_num_ooo_buffers, u16 mtu) { return 0; }
-static inline void
-qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
-                                struct qed_ll2_info *p_ll2_conn) { return; }
-static inline void
-qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
-                              struct qed_ll2_info *p_ll2_conn) { return; }
-static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
-static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
-                                   struct qed_ll2_params *params)
-                                   { return -EINVAL; }
-#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
-
 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
                                     struct qed_ll2_info *p_ll2_conn,
                                     u8 action_on_error)
 {
-       enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
+       enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
        struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
        struct core_rx_start_ramrod_data *p_ramrod = NULL;
        struct qed_spq_entry *p_ent = NULL;
@@ -1053,16 +883,15 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
        p_ramrod->sb_index = p_rx->rx_sb_index;
        p_ramrod->complete_event_flg = 1;
 
-       p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
-       DMA_REGPAIR_LE(p_ramrod->bd_base,
-                      p_rx->rxq_chain.p_phys_addr);
+       p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
+       DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
        cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
        p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
        DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
                       qed_chain_get_pbl_phys(&p_rx->rcq_chain));
 
-       p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
-       p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
+       p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
+       p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
        p_ramrod->queue_id = p_ll2_conn->queue_id;
        p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
                                                                          : 1;
@@ -1077,14 +906,14 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
        }
 
        p_ramrod->action_on_error.error_type = action_on_error;
-       p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
+       p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
                                     struct qed_ll2_info *p_ll2_conn)
 {
-       enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
+       enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
        struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
        struct core_tx_start_ramrod_data *p_ramrod = NULL;
        struct qed_spq_entry *p_ent = NULL;
@@ -1095,7 +924,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
        if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
                return 0;
 
-       if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
+       if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO)
                p_ll2_conn->tx_stats_en = 0;
        else
                p_ll2_conn->tx_stats_en = 1;
@@ -1116,7 +945,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
 
        p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
        p_ramrod->sb_index = p_tx->tx_sb_index;
-       p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
+       p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
        p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
        p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
 
@@ -1125,7 +954,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
        pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
        p_ramrod->pbl_size = cpu_to_le16(pbl_size);
 
-       switch (p_ll2_conn->conn.tx_tc) {
+       switch (p_ll2_conn->input.tx_tc) {
        case LB_TC:
                pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
                break;
@@ -1155,7 +984,8 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
                DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
        }
 
-       p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
+       p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
+
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
@@ -1211,20 +1041,20 @@ static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
 
 static int
 qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
-                             struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
+                             struct qed_ll2_info *p_ll2_info)
 {
        struct qed_ll2_rx_packet *p_descq;
        u32 capacity;
        int rc = 0;
 
-       if (!rx_num_desc)
+       if (!p_ll2_info->input.rx_num_desc)
                goto out;
 
        rc = qed_chain_alloc(p_hwfn->cdev,
                             QED_CHAIN_USE_TO_CONSUME_PRODUCE,
                             QED_CHAIN_MODE_NEXT_PTR,
                             QED_CHAIN_CNT_TYPE_U16,
-                            rx_num_desc,
+                            p_ll2_info->input.rx_num_desc,
                             sizeof(struct core_rx_bd),
                             &p_ll2_info->rx_queue.rxq_chain);
        if (rc) {
@@ -1246,7 +1076,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
                             QED_CHAIN_USE_TO_CONSUME_PRODUCE,
                             QED_CHAIN_MODE_PBL,
                             QED_CHAIN_CNT_TYPE_U16,
-                            rx_num_desc,
+                            p_ll2_info->input.rx_num_desc,
                             sizeof(struct core_rx_fast_path_cqe),
                             &p_ll2_info->rx_queue.rcq_chain);
        if (rc) {
@@ -1256,28 +1086,27 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
 
        DP_VERBOSE(p_hwfn, QED_MSG_LL2,
                   "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
-                  p_ll2_info->conn.conn_type, rx_num_desc);
+                  p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
 
 out:
        return rc;
 }
 
 static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
-                                        struct qed_ll2_info *p_ll2_info,
-                                        u16 tx_num_desc)
+                                        struct qed_ll2_info *p_ll2_info)
 {
        struct qed_ll2_tx_packet *p_descq;
        u32 capacity;
        int rc = 0;
 
-       if (!tx_num_desc)
+       if (!p_ll2_info->input.tx_num_desc)
                goto out;
 
        rc = qed_chain_alloc(p_hwfn->cdev,
                             QED_CHAIN_USE_TO_CONSUME_PRODUCE,
                             QED_CHAIN_MODE_PBL,
                             QED_CHAIN_CNT_TYPE_U16,
-                            tx_num_desc,
+                            p_ll2_info->input.tx_num_desc,
                             sizeof(struct core_tx_bd),
                             &p_ll2_info->tx_queue.txq_chain);
        if (rc)
@@ -1294,28 +1123,112 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
 
        DP_VERBOSE(p_hwfn, QED_MSG_LL2,
                   "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
-                  p_ll2_info->conn.conn_type, tx_num_desc);
+                  p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
 
 out:
        if (rc)
                DP_NOTICE(p_hwfn,
                          "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
-                         tx_num_desc);
+                         p_ll2_info->input.tx_num_desc);
+       return rc;
+}
+
+static int
+qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
+                              struct qed_ll2_info *p_ll2_info, u16 mtu)
+{
+       struct qed_ooo_buffer *p_buf = NULL;
+       void *p_virt;
+       u16 buf_idx;
+       int rc = 0;
+
+       if (p_ll2_info->input.conn_type != QED_LL2_TYPE_ISCSI_OOO)
+               return rc;
+
+       /* Correct number of requested OOO buffers if needed */
+       if (!p_ll2_info->input.rx_num_ooo_buffers) {
+               u16 num_desc = p_ll2_info->input.rx_num_desc;
+
+               if (!num_desc)
+                       return -EINVAL;
+               p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
+       }
+
+       for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
+            buf_idx++) {
+               p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
+               if (!p_buf) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+
+               p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
+               p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
+                                        ETH_CACHE_LINE_SIZE - 1) &
+                                       ~(ETH_CACHE_LINE_SIZE - 1);
+               p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                           p_buf->rx_buffer_size,
+                                           &p_buf->rx_buffer_phys_addr,
+                                           GFP_KERNEL);
+               if (!p_virt) {
+                       kfree(p_buf);
+                       rc = -ENOMEM;
+                       goto out;
+               }
+
+               p_buf->rx_buffer_virt_addr = p_virt;
+               qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+                  "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
+                  p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
+
+out:
        return rc;
 }
 
-int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
-                              struct qed_ll2_conn *p_params,
-                              u16 rx_num_desc,
-                              u16 tx_num_desc,
-                              u8 *p_connection_handle)
+static int
+qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
 {
+       if (!cbs || (!cbs->rx_comp_cb ||
+                    !cbs->rx_release_cb ||
+                    !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie))
+               return -EINVAL;
+
+       p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb;
+       p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
+       p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
+       p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
+       p_ll2_info->cbs.cookie = cbs->cookie;
+
+       return 0;
+}
+
+static enum core_error_handle
+qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
+{
+       switch (err) {
+       case QED_LL2_DROP_PACKET:
+               return LL2_DROP_PACKET;
+       case QED_LL2_DO_NOTHING:
+               return LL2_DO_NOTHING;
+       case QED_LL2_ASSERT:
+               return LL2_ASSERT;
+       default:
+               return LL2_DO_NOTHING;
+       }
+}
+
+int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
+{
+       struct qed_hwfn *p_hwfn = cxt;
        qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
        struct qed_ll2_info *p_ll2_info = NULL;
+       u8 i, *p_tx_max;
        int rc;
-       u8 i;
 
-       if (!p_connection_handle || !p_hwfn->p_ll2_info)
+       if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
                return -EINVAL;
 
        /* Find a free connection to be used */
@@ -1334,23 +1247,40 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
        if (!p_ll2_info)
                return -EBUSY;
 
-       p_ll2_info->conn = *p_params;
+       memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
 
-       rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
+       p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
+                             CORE_TX_DEST_NW : CORE_TX_DEST_LB;
+
+       /* Correct maximum number of Tx BDs */
+       p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
+       if (*p_tx_max == 0)
+               *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
+       else
+               *p_tx_max = min_t(u8, *p_tx_max,
+                                 CORE_LL2_TX_MAX_BDS_PER_PACKET);
+
+       rc = qed_ll2_set_cbs(p_ll2_info, data->cbs);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Invalid callback functions\n");
+               goto q_allocate_fail;
+       }
+
+       rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
        if (rc)
                goto q_allocate_fail;
 
-       rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
+       rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
        if (rc)
                goto q_allocate_fail;
 
        rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
-                                           rx_num_desc * 2, p_params->mtu);
+                                           data->input.mtu);
        if (rc)
                goto q_allocate_fail;
 
        /* Register callbacks for the Rx/Tx queues */
-       if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+       if (data->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
                comp_rx_cb = qed_ll2_lb_rxq_completion;
                comp_tx_cb = qed_ll2_lb_txq_completion;
        } else {
@@ -1358,7 +1288,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
                comp_tx_cb = qed_ll2_txq_completion;
        }
 
-       if (rx_num_desc) {
+       if (data->input.rx_num_desc) {
                qed_int_register_cb(p_hwfn, comp_rx_cb,
                                    &p_hwfn->p_ll2_info[i],
                                    &p_ll2_info->rx_queue.rx_sb_index,
@@ -1366,7 +1296,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
                p_ll2_info->rx_queue.b_cb_registred = true;
        }
 
-       if (tx_num_desc) {
+       if (data->input.tx_num_desc) {
                qed_int_register_cb(p_hwfn,
                                    comp_tx_cb,
                                    &p_hwfn->p_ll2_info[i],
@@ -1375,7 +1305,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
                p_ll2_info->tx_queue.b_cb_registred = true;
        }
 
-       *p_connection_handle = i;
+       *data->p_connection_handle = i;
        return rc;
 
 q_allocate_fail:
@@ -1386,24 +1316,39 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
 static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
                                           struct qed_ll2_info *p_ll2_conn)
 {
+       enum qed_ll2_error_handle error_input;
+       enum core_error_handle error_mode;
        u8 action_on_error = 0;
 
        if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
                return 0;
 
        DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
-
-       SET_FIELD(action_on_error,
-                 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
-                 p_ll2_conn->conn.ai_err_packet_too_big);
+       error_input = p_ll2_conn->input.ai_err_packet_too_big;
+       error_mode = qed_ll2_get_error_choice(error_input);
        SET_FIELD(action_on_error,
-                 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
+                 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
+       error_input = p_ll2_conn->input.ai_err_no_buf;
+       error_mode = qed_ll2_get_error_choice(error_input);
+       SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
 
        return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
 }
 
-int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+static void
+qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
+                                struct qed_ll2_info *p_ll2_conn)
+{
+       if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO)
+               return;
+
+       qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+       qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
+}
+
+int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
 {
+       struct qed_hwfn *p_hwfn = cxt;
        struct qed_ll2_info *p_ll2_conn;
        struct qed_ll2_rx_queue *p_rx;
        struct qed_ll2_tx_queue *p_tx;
@@ -1481,7 +1426,7 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
 
        qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
 
-       if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
+       if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
                qed_llh_add_protocol_filter(p_hwfn, p_ptt,
                                            0x8906, 0,
                                            QED_LLH_FILTER_ETHERTYPE);
@@ -1530,11 +1475,12 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
        DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
 }
 
-int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
+int qed_ll2_post_rx_buffer(void *cxt,
                           u8 connection_handle,
                           dma_addr_t addr,
                           u16 buf_len, void *cookie, u8 notify_fw)
 {
+       struct qed_hwfn *p_hwfn = cxt;
        struct core_rx_bd_with_buff_len *p_curb = NULL;
        struct qed_ll2_rx_packet *p_curp = NULL;
        struct qed_ll2_info *p_ll2_conn;
@@ -1593,20 +1539,18 @@ int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
                                          struct qed_ll2_tx_queue *p_tx,
                                          struct qed_ll2_tx_packet *p_curp,
-                                         u8 num_of_bds,
-                                         dma_addr_t first_frag,
-                                         u16 first_frag_len, void *p_cookie,
+                                         struct qed_ll2_tx_pkt_info *pkt,
                                          u8 notify_fw)
 {
        list_del(&p_curp->list_entry);
-       p_curp->cookie = p_cookie;
-       p_curp->bd_used = num_of_bds;
+       p_curp->cookie = pkt->cookie;
+       p_curp->bd_used = pkt->num_of_bds;
        p_curp->notify_fw = notify_fw;
        p_tx->cur_send_packet = p_curp;
        p_tx->cur_send_frag_num = 0;
 
-       p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
-       p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
+       p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
+       p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
        p_tx->cur_send_frag_num++;
 }
 
@@ -1614,51 +1558,52 @@ static void
 qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
                                 struct qed_ll2_info *p_ll2,
                                 struct qed_ll2_tx_packet *p_curp,
-                                u8 num_of_bds,
-                                enum core_tx_dest tx_dest,
-                                u16 vlan,
-                                u8 bd_flags,
-                                u16 l4_hdr_offset_w,
-                                enum core_roce_flavor_type roce_flavor,
-                                dma_addr_t first_frag,
-                                u16 first_frag_len)
+                                struct qed_ll2_tx_pkt_info *pkt)
 {
        struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
        u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
        struct core_tx_bd *start_bd = NULL;
+       enum core_roce_flavor_type roce_flavor;
+       enum core_tx_dest tx_dest;
        u16 bd_data = 0, frag_idx;
 
+       roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
+                                                            : CORE_RROCE;
+
+       tx_dest = (pkt->tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW
+                                                      : CORE_TX_DEST_LB;
+
        start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
-       start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
+       start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
        SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
-                 cpu_to_le16(l4_hdr_offset_w));
+                 cpu_to_le16(pkt->l4_hdr_offset_w));
        SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
-       bd_data |= bd_flags;
+       bd_data |= pkt->bd_flags;
        SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
-       SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds);
+       SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
        SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
        start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
-       DMA_REGPAIR_LE(start_bd->addr, first_frag);
-       start_bd->nbytes = cpu_to_le16(first_frag_len);
+       DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
+       start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
 
        DP_VERBOSE(p_hwfn,
                   (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
                   "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
                   p_ll2->queue_id,
                   p_ll2->cid,
-                  p_ll2->conn.conn_type,
+                  p_ll2->input.conn_type,
                   prod_idx,
-                  first_frag_len,
-                  num_of_bds,
+                  pkt->first_frag_len,
+                  pkt->num_of_bds,
                   le32_to_cpu(start_bd->addr.hi),
                   le32_to_cpu(start_bd->addr.lo));
 
-       if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
+       if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
                return;
 
        /* Need to provide the packet with additional BDs for frags */
        for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
-            frag_idx < num_of_bds; frag_idx++) {
+            frag_idx < pkt->num_of_bds; frag_idx++) {
                struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
 
                *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
@@ -1721,26 +1666,20 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
                   (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
                   "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
                   p_ll2_conn->queue_id,
-                  p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
+                  p_ll2_conn->cid,
+                  p_ll2_conn->input.conn_type, db_msg.spq_prod);
 }
 
-int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
+int qed_ll2_prepare_tx_packet(void *cxt,
                              u8 connection_handle,
-                             u8 num_of_bds,
-                             u16 vlan,
-                             u8 bd_flags,
-                             u16 l4_hdr_offset_w,
-                             enum qed_ll2_tx_dest e_tx_dest,
-                             enum qed_ll2_roce_flavor_type qed_roce_flavor,
-                             dma_addr_t first_frag,
-                             u16 first_frag_len, void *cookie, u8 notify_fw)
+                             struct qed_ll2_tx_pkt_info *pkt,
+                             bool notify_fw)
 {
+       struct qed_hwfn *p_hwfn = cxt;
        struct qed_ll2_tx_packet *p_curp = NULL;
        struct qed_ll2_info *p_ll2_conn = NULL;
-       enum core_roce_flavor_type roce_flavor;
        struct qed_ll2_tx_queue *p_tx;
        struct qed_chain *p_tx_chain;
-       enum core_tx_dest tx_dest;
        unsigned long flags;
        int rc = 0;
 
@@ -1750,7 +1689,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
        p_tx = &p_ll2_conn->tx_queue;
        p_tx_chain = &p_tx->txq_chain;
 
-       if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
+       if (pkt->num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
                return -EIO;
 
        spin_lock_irqsave(&p_tx->lock, flags);
@@ -1763,7 +1702,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
        if (!list_empty(&p_tx->free_descq))
                p_curp = list_first_entry(&p_tx->free_descq,
                                          struct qed_ll2_tx_packet, list_entry);
-       if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
+       if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
                p_curp = NULL;
 
        if (!p_curp) {
@@ -1771,26 +1710,10 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
                goto out;
        }
 
-       tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW :
-                                                   CORE_TX_DEST_LB;
-       if (qed_roce_flavor == QED_LL2_ROCE) {
-               roce_flavor = CORE_ROCE;
-       } else if (qed_roce_flavor == QED_LL2_RROCE) {
-               roce_flavor = CORE_RROCE;
-       } else {
-               rc = -EINVAL;
-               goto out;
-       }
-
        /* Prepare packet and BD, and perhaps send a doorbell to FW */
-       qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
-                                     num_of_bds, first_frag,
-                                     first_frag_len, cookie, notify_fw);
-       qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
-                                        num_of_bds, tx_dest,
-                                        vlan, bd_flags, l4_hdr_offset_w,
-                                        roce_flavor,
-                                        first_frag, first_frag_len);
+       qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
+
+       qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
 
        qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
 
@@ -1799,11 +1722,12 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
-int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
+int qed_ll2_set_fragment_of_tx_packet(void *cxt,
                                      u8 connection_handle,
                                      dma_addr_t addr, u16 nbytes)
 {
        struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
+       struct qed_hwfn *p_hwfn = cxt;
        struct qed_ll2_info *p_ll2_conn = NULL;
        u16 cur_send_frag_num = 0;
        struct core_tx_bd *p_bd;
@@ -1838,8 +1762,9 @@ int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
        return 0;
 }
 
-int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
 {
+       struct qed_hwfn *p_hwfn = cxt;
        struct qed_ll2_info *p_ll2_conn = NULL;
        int rc = -EINVAL;
        struct qed_ptt *p_ptt;
@@ -1869,10 +1794,10 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
                qed_ll2_rxq_flush(p_hwfn, connection_handle);
        }
 
-       if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
+       if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO)
                qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
 
-       if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
+       if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
                qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
                                               0x8906, 0,
                                               QED_LLH_FILTER_ETHERTYPE);
@@ -1886,8 +1811,28 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
        return rc;
 }
 
-void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
+                                          struct qed_ll2_info *p_ll2_conn)
 {
+       struct qed_ooo_buffer *p_buffer;
+
+       if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO)
+               return;
+
+       qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+       while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
+                                                  p_hwfn->p_ooo_info))) {
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 p_buffer->rx_buffer_size,
+                                 p_buffer->rx_buffer_virt_addr,
+                                 p_buffer->rx_buffer_phys_addr);
+               kfree(p_buffer);
+       }
+}
+
+void qed_ll2_release_connection(void *cxt, u8 connection_handle)
+{
+       struct qed_hwfn *p_hwfn = cxt;
        struct qed_ll2_info *p_ll2_conn = NULL;
 
        p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
@@ -1957,6 +1902,27 @@ void qed_ll2_free(struct qed_hwfn *p_hwfn)
        p_hwfn->p_ll2_info = NULL;
 }
 
+static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn,
+                                   struct qed_ptt *p_ptt,
+                                   struct qed_ll2_stats *p_stats)
+{
+       struct core_ll2_port_stats port_stats;
+
+       memset(&port_stats, 0, sizeof(port_stats));
+       qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
+                       BAR0_MAP_REG_TSDM_RAM +
+                       TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)),
+                       sizeof(port_stats));
+
+       p_stats->gsi_invalid_hdr = HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
+       p_stats->gsi_invalid_pkt_length =
+           HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length);
+       p_stats->gsi_unsupported_pkt_typ =
+           HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ);
+       p_stats->gsi_crcchksm_error =
+           HILO_64_REGPAIR(port_stats.gsi_crcchksm_error);
+}
+
 static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
                                struct qed_ptt *p_ptt,
                                struct qed_ll2_info *p_ll2_conn,
@@ -2020,9 +1986,10 @@ static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
        p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
 }
 
-int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
+int qed_ll2_get_stats(void *cxt,
                      u8 connection_handle, struct qed_ll2_stats *p_stats)
 {
+       struct qed_hwfn *p_hwfn = cxt;
        struct qed_ll2_info *p_ll2_conn = NULL;
        struct qed_ptt *p_ptt;
 
@@ -2040,6 +2007,8 @@ int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
                return -EINVAL;
        }
 
+       if (p_ll2_conn->input.gsi_enable)
+               _qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats);
        _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
        _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
        if (p_ll2_conn->tx_stats_en)
@@ -2049,6 +2018,17 @@ int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
        return 0;
 }
 
+static void qed_ll2b_release_rx_packet(void *cxt,
+                                      u8 connection_handle,
+                                      void *cookie,
+                                      dma_addr_t rx_buf_addr,
+                                      bool b_last_packet)
+{
+       struct qed_hwfn *p_hwfn = cxt;
+
+       qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie);
+}
+
 static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
                                    const struct qed_ll2_cb_ops *ops,
                                    void *cookie)
@@ -2057,21 +2037,86 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
        cdev->ll2->cb_cookie = cookie;
 }
 
+struct qed_ll2_cbs ll2_cbs = {
+       .rx_comp_cb = &qed_ll2b_complete_rx_packet,
+       .rx_release_cb = &qed_ll2b_release_rx_packet,
+       .tx_comp_cb = &qed_ll2b_complete_tx_packet,
+       .tx_release_cb = &qed_ll2b_complete_tx_packet,
+};
+
+static void qed_ll2_set_conn_data(struct qed_dev *cdev,
+                                 struct qed_ll2_acquire_data *data,
+                                 struct qed_ll2_params *params,
+                                 enum qed_ll2_conn_type conn_type,
+                                 u8 *handle, bool lb)
+{
+       memset(data, 0, sizeof(*data));
+
+       data->input.conn_type = conn_type;
+       data->input.mtu = params->mtu;
+       data->input.rx_num_desc = QED_LL2_RX_SIZE;
+       data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
+       data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
+       data->input.tx_num_desc = QED_LL2_TX_SIZE;
+       data->p_connection_handle = handle;
+       data->cbs = &ll2_cbs;
+       ll2_cbs.cookie = QED_LEADING_HWFN(cdev);
+
+       if (lb) {
+               data->input.tx_tc = OOO_LB_TC;
+               data->input.tx_dest = QED_LL2_TX_DEST_LB;
+       } else {
+               data->input.tx_tc = 0;
+               data->input.tx_dest = QED_LL2_TX_DEST_NW;
+       }
+}
+
+static int qed_ll2_start_ooo(struct qed_dev *cdev,
+                            struct qed_ll2_params *params)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
+       struct qed_ll2_acquire_data data;
+       int rc;
+
+       qed_ll2_set_conn_data(cdev, &data, params,
+                             QED_LL2_TYPE_ISCSI_OOO, handle, true);
+
+       rc = qed_ll2_acquire_connection(hwfn, &data);
+       if (rc) {
+               DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
+               goto out;
+       }
+
+       rc = qed_ll2_establish_connection(hwfn, *handle);
+       if (rc) {
+               DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
+               goto fail;
+       }
+
+       return 0;
+
+fail:
+       qed_ll2_release_connection(hwfn, *handle);
+out:
+       *handle = QED_LL2_UNUSED_HANDLE;
+       return rc;
+}
+
 static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
 {
-       struct qed_ll2_conn ll2_info;
        struct qed_ll2_buffer *buffer, *tmp_buffer;
        enum qed_ll2_conn_type conn_type;
+       struct qed_ll2_acquire_data data;
        struct qed_ptt *p_ptt;
        int rc, i;
-       u8 gsi_enable = 1;
+
 
        /* Initialize LL2 locks & lists */
        INIT_LIST_HEAD(&cdev->ll2->list);
        spin_lock_init(&cdev->ll2->lock);
        cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
                             L1_CACHE_BYTES + params->mtu;
-       cdev->ll2->frags_mapped = params->frags_mapped;
 
        /*Allocate memory for LL2 */
        DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
@@ -2096,11 +2141,9 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
        switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
        case QED_PCI_FCOE:
                conn_type = QED_LL2_TYPE_FCOE;
-               gsi_enable = 0;
                break;
        case QED_PCI_ISCSI:
                conn_type = QED_LL2_TYPE_ISCSI;
-               gsi_enable = 0;
                break;
        case QED_PCI_ETH_ROCE:
                conn_type = QED_LL2_TYPE_ROCE;
@@ -2109,20 +2152,10 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
                conn_type = QED_LL2_TYPE_TEST;
        }
 
-       /* Prepare the temporary ll2 information */
-       memset(&ll2_info, 0, sizeof(ll2_info));
+       qed_ll2_set_conn_data(cdev, &data, params, conn_type,
+                             &cdev->ll2->handle, false);
 
-       ll2_info.conn_type = conn_type;
-       ll2_info.mtu = params->mtu;
-       ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
-       ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
-       ll2_info.tx_tc = 0;
-       ll2_info.tx_dest = CORE_TX_DEST_NW;
-       ll2_info.gsi_enable = gsi_enable;
-
-       rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
-                                       QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
-                                       &cdev->ll2->handle);
+       rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data);
        if (rc) {
                DP_INFO(cdev, "Failed to acquire LL2 connection\n");
                goto fail;
@@ -2245,6 +2278,7 @@ static int qed_ll2_stop(struct qed_dev *cdev)
 
 static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
 {
+       struct qed_ll2_tx_pkt_info pkt;
        const skb_frag_t *frag;
        int rc = -EINVAL, i;
        dma_addr_t mapping;
@@ -2279,32 +2313,30 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
                flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
        }
 
-       rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
-                                      cdev->ll2->handle,
-                                      1 + skb_shinfo(skb)->nr_frags,
-                                      vlan, flags, 0, QED_LL2_TX_DEST_NW,
-                                      0 /* RoCE FLAVOR */,
-                                      mapping, skb->len, skb, 1);
+       memset(&pkt, 0, sizeof(pkt));
+       pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
+       pkt.vlan = vlan;
+       pkt.bd_flags = flags;
+       pkt.tx_dest = QED_LL2_TX_DEST_NW;
+       pkt.first_frag = mapping;
+       pkt.first_frag_len = skb->len;
+       pkt.cookie = skb;
+
+       rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
+                                      &pkt, 1);
        if (rc)
                goto err;
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                frag = &skb_shinfo(skb)->frags[i];
-               if (!cdev->ll2->frags_mapped) {
-                       mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
-                                                  skb_frag_size(frag),
-                                                  DMA_TO_DEVICE);
-
-                       if (unlikely(dma_mapping_error(&cdev->pdev->dev,
-                                                      mapping))) {
-                               DP_NOTICE(cdev,
-                                         "Unable to map frag - dropping packet\n");
-                               rc = -ENOMEM;
-                               goto err;
-                       }
-               } else {
-                       mapping = page_to_phys(skb_frag_page(frag)) |
-                           frag->page_offset;
+
+               mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
+                                          skb_frag_size(frag), DMA_TO_DEVICE);
+
+               if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
+                       DP_NOTICE(cdev,
+                                 "Unable to map frag - dropping packet\n");
+                       goto err;
                }
 
                rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
index 2c07d0ed971a6de112037085a31a581d11fab77b..a822528e9c63012d689a878233a47f11f00d701a 100644 (file)
 
 #define QED_MAX_NUM_OF_LL2_CONNECTIONS                    (4)
 
-enum qed_ll2_roce_flavor_type {
-       QED_LL2_ROCE,
-       QED_LL2_RROCE,
-       MAX_QED_LL2_ROCE_FLAVOR_TYPE
-};
-
-enum qed_ll2_conn_type {
-       QED_LL2_TYPE_FCOE,
-       QED_LL2_TYPE_ISCSI,
-       QED_LL2_TYPE_TEST,
-       QED_LL2_TYPE_ISCSI_OOO,
-       QED_LL2_TYPE_RESERVED2,
-       QED_LL2_TYPE_ROCE,
-       QED_LL2_TYPE_RESERVED3,
-       MAX_QED_LL2_RX_CONN_TYPE
-};
-
-enum qed_ll2_tx_dest {
-       QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */
-       QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */
-       QED_LL2_TX_DEST_MAX
-};
-
 struct qed_ll2_rx_packet {
        struct list_head list_entry;
        struct core_rx_bd_with_buff_len *rxq_bd;
@@ -135,30 +112,21 @@ struct qed_ll2_tx_queue {
        bool b_completing_packet;
 };
 
-struct qed_ll2_conn {
-       enum qed_ll2_conn_type conn_type;
-       u16 mtu;
-       u8 rx_drop_ttl0_flg;
-       u8 rx_vlan_removal_en;
-       u8 tx_tc;
-       enum core_tx_dest tx_dest;
-       enum core_error_handle ai_err_packet_too_big;
-       enum core_error_handle ai_err_no_buf;
-       u8 gsi_enable;
-};
-
 struct qed_ll2_info {
        /* Lock protecting the state of LL2 */
        struct mutex mutex;
-       struct qed_ll2_conn conn;
+
+       struct qed_ll2_acquire_data_inputs input;
        u32 cid;
        u8 my_id;
        u8 queue_id;
        u8 tx_stats_id;
        bool b_active;
+       enum core_tx_dest tx_dest;
        u8 tx_stats_en;
        struct qed_ll2_rx_queue rx_queue;
        struct qed_ll2_tx_queue tx_queue;
+       struct qed_ll2_cbs cbs;
 };
 
 /**
@@ -166,38 +134,30 @@ struct qed_ll2_info {
  *        starts rx & tx (if relevant) queues pair. Provides
  *        connecion handler as output parameter.
  *
- * @param p_hwfn
- * @param p_params             Contain various configuration properties
- * @param rx_num_desc
- * @param tx_num_desc
- *
- * @param p_connection_handle  Output container for LL2 connection's handle
  *
- * @return 0 on success, failure otherwise
+ * @param cxt - pointer to the hw-function [opaque to some]
+ * @param data - describes connection parameters
+ * @return int
  */
-int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
-                              struct qed_ll2_conn *p_params,
-                              u16 rx_num_desc,
-                              u16 tx_num_desc,
-                              u8 *p_connection_handle);
+int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data);
 
 /**
  * @brief qed_ll2_establish_connection - start previously
  *        allocated LL2 queues pair
  *
- * @param p_hwfn
+ * @param cxt - pointer to the hw-function [opaque to some]
  * @param p_ptt
  * @param connection_handle    LL2 connection's handle obtained from
  *                              qed_ll2_require_connection
  *
  * @return 0 on success, failure otherwise
  */
-int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+int qed_ll2_establish_connection(void *cxt, u8 connection_handle);
 
 /**
  * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue.
  *
- * @param p_hwfn
+ * @param cxt - pointer to the hw-function [opaque to some]
  * @param connection_handle    LL2 connection's handle obtained from
  *                             qed_ll2_require_connection
  * @param addr                 rx (physical address) buffers to submit
@@ -206,7 +166,7 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
  *
  * @return 0 on success, failure otherwise
  */
-int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
+int qed_ll2_post_rx_buffer(void *cxt,
                           u8 connection_handle,
                           dma_addr_t addr,
                           u16 buf_len, void *cookie, u8 notify_fw);
@@ -215,53 +175,34 @@ int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
  * @brief qed_ll2_prepare_tx_packet - request for start Tx BD
  *                                   to prepare Tx packet submission to FW.
  *
- * @param p_hwfn
- * @param connection_handle    LL2 connection's handle obtained from
- *                             qed_ll2_require_connection
- * @param num_of_bds           a number of requested BD equals a number of
- *                             fragments in Tx packet
- * @param vlan                 VLAN to insert to packet (if insertion set)
- * @param bd_flags
- * @param l4_hdr_offset_w      L4 Header Offset from start of packet
- *                             (in words). This is needed if both l4_csum
- *                             and ipv6_ext are set
- * @param e_tx_dest             indicates if the packet is to be transmitted via
- *                              loopback or to the network
- * @param first_frag
- * @param first_frag_len
- * @param cookie
- *
- * @param notify_fw
+ * @param cxt - pointer to the hw-function [opaque to some]
+ * @param connection_handle
+ * @param pkt - info regarding the tx packet
+ * @param notify_fw - issue doorbell to fw for this packet
  *
  * @return 0 on success, failure otherwise
  */
-int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
+int qed_ll2_prepare_tx_packet(void *cxt,
                              u8 connection_handle,
-                             u8 num_of_bds,
-                             u16 vlan,
-                             u8 bd_flags,
-                             u16 l4_hdr_offset_w,
-                             enum qed_ll2_tx_dest e_tx_dest,
-                             enum qed_ll2_roce_flavor_type qed_roce_flavor,
-                             dma_addr_t first_frag,
-                             u16 first_frag_len, void *cookie, u8 notify_fw);
+                             struct qed_ll2_tx_pkt_info *pkt,
+                             bool notify_fw);
 
 /**
  * @brief qed_ll2_release_connection - releases resources
  *                                     allocated for LL2 connection
  *
- * @param p_hwfn
+ * @param cxt - pointer to the hw-function [opaque to some]
  * @param connection_handle            LL2 connection's handle obtained from
  *                                     qed_ll2_require_connection
  */
-void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+void qed_ll2_release_connection(void *cxt, u8 connection_handle);
 
 /**
  * @brief qed_ll2_set_fragment_of_tx_packet -  provides fragments to fill
  *                                             Tx BD of BDs requested by
  *                                             qed_ll2_prepare_tx_packet
  *
- * @param p_hwfn
+ * @param cxt - pointer to the hw-function [opaque to some]
  * @param connection_handle                    LL2 connection's handle
  *                                             obtained from
  *                                             qed_ll2_require_connection
@@ -270,7 +211,7 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
  *
  * @return 0 on success, failure otherwise
  */
-int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
+int qed_ll2_set_fragment_of_tx_packet(void *cxt,
                                      u8 connection_handle,
                                      dma_addr_t addr, u16 nbytes);
 
@@ -278,27 +219,27 @@ int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
  * @brief qed_ll2_terminate_connection -       stops Tx/Rx queues
  *
  *
- * @param p_hwfn
+ * @param cxt - pointer to the hw-function [opaque to some]
  * @param connection_handle                    LL2 connection's handle
  *                                             obtained from
  *                                             qed_ll2_require_connection
  *
  * @return 0 on success, failure otherwise
  */
-int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+int qed_ll2_terminate_connection(void *cxt, u8 connection_handle);
 
 /**
  * @brief qed_ll2_get_stats -  get LL2 queue's statistics
  *
  *
- * @param p_hwfn
+ * @param cxt - pointer to the hw-function [opaque to some]
  * @param connection_handle    LL2 connection's handle obtained from
  *                             qed_ll2_require_connection
  * @param p_stats
  *
  * @return 0 on success, failure otherwise
  */
-int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
+int qed_ll2_get_stats(void *cxt,
                      u8 connection_handle, struct qed_ll2_stats *p_stats);
 
 /**
index b9434b707b0821ac4564a657ef7d1be2f361aae0..4bc2f6c47f69b7826c92628378bb8e3bfa82221e 100644 (file)
@@ -35,7 +35,6 @@
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/errno.h>
-#include <linux/etherdevice.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/io.h>
@@ -65,6 +64,7 @@
 #include "qed_sp.h"
 #include "qed_roce.h"
 #include "qed_ll2.h"
+#include <linux/qed/qed_ll2_if.h>
 
 static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
 
@@ -2709,301 +2709,35 @@ static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
        spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 }
 
-void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
-                                    u8 connection_handle,
-                                    void *cookie,
-                                    dma_addr_t first_frag_addr,
-                                    bool b_last_fragment, bool b_last_packet)
-{
-       struct qed_roce_ll2_packet *packet = cookie;
-       struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
-
-       roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet);
-}
-
-void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
-                                   u8 connection_handle,
-                                   void *cookie,
-                                   dma_addr_t first_frag_addr,
-                                   bool b_last_fragment, bool b_last_packet)
-{
-       qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle,
-                                       cookie, first_frag_addr,
-                                       b_last_fragment, b_last_packet);
-}
-
-void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
-                                    u8 connection_handle,
-                                    void *cookie,
-                                    dma_addr_t rx_buf_addr,
-                                    u16 data_length,
-                                    u8 data_length_error,
-                                    u16 parse_flags,
-                                    u16 vlan,
-                                    u32 src_mac_addr_hi,
-                                    u16 src_mac_addr_lo, bool b_last_packet)
-{
-       struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
-       struct qed_roce_ll2_rx_params params;
-       struct qed_dev *cdev = p_hwfn->cdev;
-       struct qed_roce_ll2_packet pkt;
-
-       DP_VERBOSE(cdev,
-                  QED_MSG_LL2,
-                  "roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n",
-                  (void *)(uintptr_t)rx_buf_addr,
-                  data_length, data_length_error);
-
-       memset(&pkt, 0, sizeof(pkt));
-       pkt.n_seg = 1;
-       pkt.payload[0].baddr = rx_buf_addr;
-       pkt.payload[0].len = data_length;
-
-       memset(&params, 0, sizeof(params));
-       params.vlan_id = vlan;
-       *((u32 *)&params.smac[0]) = ntohl(src_mac_addr_hi);
-       *((u16 *)&params.smac[4]) = ntohs(src_mac_addr_lo);
-
-       if (data_length_error) {
-               DP_ERR(cdev,
-                      "roce ll2 rx complete: data length error %d, length=%d\n",
-                      data_length_error, data_length);
-               params.rc = -EINVAL;
-       }
-
-       roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, &params);
-}
-
 static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
                                       u8 *old_mac_address,
                                       u8 *new_mac_address)
 {
-       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
        struct qed_ptt *p_ptt;
        int rc = 0;
 
-       if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) {
-               DP_ERR(cdev,
-                      "qed roce mac filter failed - roce_info/ll2 NULL\n");
-               return -EINVAL;
-       }
-
-       p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+       p_ptt = qed_ptt_acquire(p_hwfn);
        if (!p_ptt) {
                DP_ERR(cdev,
                       "qed roce ll2 mac filter set: failed to acquire PTT\n");
                return -EINVAL;
        }
 
-       mutex_lock(&hwfn->ll2->lock);
        if (old_mac_address)
-               qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
-                                         old_mac_address);
+               qed_llh_remove_mac_filter(p_hwfn, p_ptt, old_mac_address);
        if (new_mac_address)
-               rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
-                                           new_mac_address);
-       mutex_unlock(&hwfn->ll2->lock);
-
-       qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
-
-       if (rc)
-               DP_ERR(cdev,
-                      "qed roce ll2 mac filter set: failed to add mac filter\n");
-
-       return rc;
-}
-
-static int qed_roce_ll2_start(struct qed_dev *cdev,
-                             struct qed_roce_ll2_params *params)
-{
-       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
-       struct qed_roce_ll2_info *roce_ll2;
-       struct qed_ll2_conn ll2_params;
-       int rc;
-
-       if (!params) {
-               DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n");
-               return -EINVAL;
-       }
-       if (!params->cbs.tx_cb || !params->cbs.rx_cb) {
-               DP_ERR(cdev,
-                      "qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n",
-                      params->cbs.tx_cb, params->cbs.rx_cb);
-               return -EINVAL;
-       }
-       if (!is_valid_ether_addr(params->mac_address)) {
-               DP_ERR(cdev,
-                      "qed roce ll2 start: failed due to invalid Ethernet address %pM\n",
-                      params->mac_address);
-               return -EINVAL;
-       }
-
-       /* Initialize */
-       roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC);
-       if (!roce_ll2) {
-               DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n");
-               return -ENOMEM;
-       }
-       roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
-       roce_ll2->cbs = params->cbs;
-       roce_ll2->cb_cookie = params->cb_cookie;
-       mutex_init(&roce_ll2->lock);
-
-       memset(&ll2_params, 0, sizeof(ll2_params));
-       ll2_params.conn_type = QED_LL2_TYPE_ROCE;
-       ll2_params.mtu = params->mtu;
-       ll2_params.rx_drop_ttl0_flg = true;
-       ll2_params.rx_vlan_removal_en = false;
-       ll2_params.tx_dest = CORE_TX_DEST_NW;
-       ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET;
-       ll2_params.ai_err_no_buf = LL2_DROP_PACKET;
-       ll2_params.gsi_enable = true;
-
-       rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params,
-                                       params->max_rx_buffers,
-                                       params->max_tx_buffers,
-                                       &roce_ll2->handle);
-       if (rc) {
-               DP_ERR(cdev,
-                      "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n",
-                      rc);
-               goto err;
-       }
-
-       rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
-                                         roce_ll2->handle);
-       if (rc) {
-               DP_ERR(cdev,
-                      "qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n",
-                      rc);
-               goto err1;
-       }
-
-       hwfn->ll2 = roce_ll2;
-
-       rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address);
-       if (rc) {
-               hwfn->ll2 = NULL;
-               goto err2;
-       }
-       ether_addr_copy(roce_ll2->mac_address, params->mac_address);
-
-       return 0;
-
-err2:
-       qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
-err1:
-       qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
-err:
-       kfree(roce_ll2);
-       return rc;
-}
+               rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, new_mac_address);
 
-static int qed_roce_ll2_stop(struct qed_dev *cdev)
-{
-       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
-       struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
-       int rc;
-
-       if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
-               DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
-               return -EINVAL;
-       }
-
-       /* remove LL2 MAC address filter */
-       rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL);
-       eth_zero_addr(roce_ll2->mac_address);
+       qed_ptt_release(p_hwfn, p_ptt);
 
-       rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
-                                         roce_ll2->handle);
        if (rc)
                DP_ERR(cdev,
-                      "qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n",
-                      rc);
-
-       qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
-
-       roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
-
-       kfree(roce_ll2);
+                      "qed roce ll2 mac filter set: failed to add MAC filter\n");
 
        return rc;
 }
 
-static int qed_roce_ll2_tx(struct qed_dev *cdev,
-                          struct qed_roce_ll2_packet *pkt,
-                          struct qed_roce_ll2_tx_params *params)
-{
-       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
-       struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
-       enum qed_ll2_roce_flavor_type qed_roce_flavor;
-       u8 flags = 0;
-       int rc;
-       int i;
-
-       if (!pkt || !params) {
-               DP_ERR(cdev,
-                      "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
-                      cdev, pkt, params);
-               return -EINVAL;
-       }
-
-       qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE
-                                                     : QED_LL2_RROCE;
-
-       if (pkt->roce_mode == ROCE_V2_IPV4)
-               flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
-
-       /* Tx header */
-       rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
-                                      1 + pkt->n_seg, 0, flags, 0,
-                                      QED_LL2_TX_DEST_NW,
-                                      qed_roce_flavor, pkt->header.baddr,
-                                      pkt->header.len, pkt, 1);
-       if (rc) {
-               DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc);
-               return QED_ROCE_TX_HEAD_FAILURE;
-       }
-
-       /* Tx payload */
-       for (i = 0; i < pkt->n_seg; i++) {
-               rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
-                                                      roce_ll2->handle,
-                                                      pkt->payload[i].baddr,
-                                                      pkt->payload[i].len);
-               if (rc) {
-                       /* If failed not much to do here, partial packet has
-                        * been posted * we can't free memory, will need to wait
-                        * for completion
-                        */
-                       DP_ERR(cdev,
-                              "roce ll2 tx: payload failed (rc=%d)\n", rc);
-                       return QED_ROCE_TX_FRAG_FAILURE;
-               }
-       }
-
-       return 0;
-}
-
-static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev,
-                                      struct qed_roce_ll2_buffer *buf,
-                                      u64 cookie, u8 notify_fw)
-{
-       return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
-                                     QED_LEADING_HWFN(cdev)->ll2->handle,
-                                     buf->baddr, buf->len,
-                                     (void *)(uintptr_t)cookie, notify_fw);
-}
-
-static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
-{
-       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
-       struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
-
-       return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
-                                roce_ll2->handle, stats);
-}
-
 static const struct qed_rdma_ops qed_rdma_ops_pass = {
        .common = &qed_common_ops_pass,
        .fill_dev_info = &qed_fill_rdma_dev_info,
@@ -3031,12 +2765,15 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
        .rdma_free_tid = &qed_rdma_free_tid,
        .rdma_register_tid = &qed_rdma_register_tid,
        .rdma_deregister_tid = &qed_rdma_deregister_tid,
-       .roce_ll2_start = &qed_roce_ll2_start,
-       .roce_ll2_stop = &qed_roce_ll2_stop,
-       .roce_ll2_tx = &qed_roce_ll2_tx,
-       .roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer,
-       .roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
-       .roce_ll2_stats = &qed_roce_ll2_stats,
+       .ll2_acquire_connection = &qed_ll2_acquire_connection,
+       .ll2_establish_connection = &qed_ll2_establish_connection,
+       .ll2_terminate_connection = &qed_ll2_terminate_connection,
+       .ll2_release_connection = &qed_ll2_release_connection,
+       .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer,
+       .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet,
+       .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
+       .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
+       .ll2_get_stats = &qed_ll2_get_stats,
 };
 
 const struct qed_rdma_ops *qed_get_rdma_ops(void)
index 9742af51618301c633997a939bb6242f2e83a61c..94be3b5a39c481c821e86ce8210ac3a9fa23099c 100644 (file)
@@ -170,53 +170,10 @@ struct qed_rdma_qp {
 void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 void qed_roce_async_event(struct qed_hwfn *p_hwfn,
                          u8 fw_event_code, union rdma_eqe_data *rdma_data);
-void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
-                                    u8 connection_handle,
-                                    void *cookie,
-                                    dma_addr_t first_frag_addr,
-                                    bool b_last_fragment, bool b_last_packet);
-void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
-                                   u8 connection_handle,
-                                   void *cookie,
-                                   dma_addr_t first_frag_addr,
-                                   bool b_last_fragment, bool b_last_packet);
-void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
-                                    u8 connection_handle,
-                                    void *cookie,
-                                    dma_addr_t rx_buf_addr,
-                                    u16 data_length,
-                                    u8 data_length_error,
-                                    u16 parse_flags,
-                                    u16 vlan,
-                                    u32 src_mac_addr_hi,
-                                    u16 src_mac_addr_lo, bool b_last_packet);
 #else
 static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
 static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
                                        u8 fw_event_code,
                                        union rdma_eqe_data *rdma_data) {}
-static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
-                                                  u8 connection_handle,
-                                                  void *cookie,
-                                                  dma_addr_t first_frag_addr,
-                                                  bool b_last_fragment,
-                                                  bool b_last_packet) {}
-static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
-                                                 u8 connection_handle,
-                                                 void *cookie,
-                                                 dma_addr_t first_frag_addr,
-                                                 bool b_last_fragment,
-                                                 bool b_last_packet) {}
-static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
-                                                  u8 connection_handle,
-                                                  void *cookie,
-                                                  dma_addr_t rx_buf_addr,
-                                                  u16 data_length,
-                                                  u8 data_length_error,
-                                                  u16 parse_flags,
-                                                  u16 vlan,
-                                                  u32 src_mac_addr_hi,
-                                                  u16 src_mac_addr_lo,
-                                                  bool b_last_packet) {}
 #endif
 #endif
index b65bbc54a097937b0539eb8331c25edb5a0577d9..34d9b882a7807a63f6fde24ce75922f50606b7a8 100644 (file)
@@ -1105,6 +1105,11 @@ static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
        return 0;
 }
 
+static inline void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, u16 sb_id,
+                                     struct qed_sb_info *p_sb)
+{
+}
+
 static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
                                        u8 vport_id,
                                        u16 mtu,
index ee9675db5bf953b92cf46ead04e19f744ba36204..748fb12260a629d15c7018f73aa16c356eef0da6 100644 (file)
@@ -105,32 +105,27 @@ struct rocker_world_ops {
        int (*port_open)(struct rocker_port *rocker_port);
        void (*port_stop)(struct rocker_port *rocker_port);
        int (*port_attr_stp_state_set)(struct rocker_port *rocker_port,
-                                      u8 state,
-                                      struct switchdev_trans *trans);
+                                      u8 state);
        int (*port_attr_bridge_flags_set)(struct rocker_port *rocker_port,
                                          unsigned long brport_flags,
                                          struct switchdev_trans *trans);
        int (*port_attr_bridge_flags_get)(const struct rocker_port *rocker_port,
                                          unsigned long *p_brport_flags);
+       int (*port_attr_bridge_flags_support_get)(const struct rocker_port *
+                                                 rocker_port,
+                                                 unsigned long *
+                                                 p_brport_flags);
        int (*port_attr_bridge_ageing_time_set)(struct rocker_port *rocker_port,
                                                u32 ageing_time,
                                                struct switchdev_trans *trans);
        int (*port_obj_vlan_add)(struct rocker_port *rocker_port,
-                                const struct switchdev_obj_port_vlan *vlan,
-                                struct switchdev_trans *trans);
+                                const struct switchdev_obj_port_vlan *vlan);
        int (*port_obj_vlan_del)(struct rocker_port *rocker_port,
                                 const struct switchdev_obj_port_vlan *vlan);
-       int (*port_obj_vlan_dump)(const struct rocker_port *rocker_port,
-                                 struct switchdev_obj_port_vlan *vlan,
-                                 switchdev_obj_dump_cb_t *cb);
        int (*port_obj_fdb_add)(struct rocker_port *rocker_port,
-                               const struct switchdev_obj_port_fdb *fdb,
-                               struct switchdev_trans *trans);
+                               u16 vid, const unsigned char *addr);
        int (*port_obj_fdb_del)(struct rocker_port *rocker_port,
-                               const struct switchdev_obj_port_fdb *fdb);
-       int (*port_obj_fdb_dump)(const struct rocker_port *rocker_port,
-                                struct switchdev_obj_port_fdb *fdb,
-                                switchdev_obj_dump_cb_t *cb);
+                               u16 vid, const unsigned char *addr);
        int (*port_master_linked)(struct rocker_port *rocker_port,
                                  struct net_device *master);
        int (*port_master_unlinked)(struct rocker_port *rocker_port,
index bab13613b138cc15c734d9e9fff5f465ef480a44..b1e5c07099fa1f607e2092d4c60ff31305ce02c1 100644 (file)
@@ -1557,7 +1557,11 @@ static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
 
        if (!wops->port_attr_stp_state_set)
                return -EOPNOTSUPP;
-       return wops->port_attr_stp_state_set(rocker_port, state, trans);
+
+       if (switchdev_trans_ph_prepare(trans))
+               return 0;
+
+       return wops->port_attr_stp_state_set(rocker_port, state);
 }
 
 static int
@@ -1569,6 +1573,10 @@ rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
 
        if (!wops->port_attr_bridge_flags_set)
                return -EOPNOTSUPP;
+
+       if (switchdev_trans_ph_prepare(trans))
+               return 0;
+
        return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
                                                trans);
 }
@@ -1584,6 +1592,20 @@ rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
        return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags);
 }
 
+static int
+rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port *
+                                               rocker_port,
+                                               unsigned long *
+                                               p_brport_flags_support)
+{
+       struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+       if (!wops->port_attr_bridge_flags_support_get)
+               return -EOPNOTSUPP;
+       return wops->port_attr_bridge_flags_support_get(rocker_port,
+                                                       p_brport_flags_support);
+}
+
 static int
 rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
                                              u32 ageing_time,
@@ -1594,6 +1616,10 @@ rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
 
        if (!wops->port_attr_bridge_ageing_time_set)
                return -EOPNOTSUPP;
+
+       if (switchdev_trans_ph_prepare(trans))
+               return 0;
+
        return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time,
                                                      trans);
 }
@@ -1607,7 +1633,11 @@ rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
 
        if (!wops->port_obj_vlan_add)
                return -EOPNOTSUPP;
-       return wops->port_obj_vlan_add(rocker_port, vlan, trans);
+
+       if (switchdev_trans_ph_prepare(trans))
+               return 0;
+
+       return wops->port_obj_vlan_add(rocker_port, vlan);
 }
 
 static int
@@ -1622,50 +1652,26 @@ rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port,
 }
 
 static int
-rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port,
-                               struct switchdev_obj_port_vlan *vlan,
-                               switchdev_obj_dump_cb_t *cb)
-{
-       struct rocker_world_ops *wops = rocker_port->rocker->wops;
-
-       if (!wops->port_obj_vlan_dump)
-               return -EOPNOTSUPP;
-       return wops->port_obj_vlan_dump(rocker_port, vlan, cb);
-}
-
-static int
-rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port,
-                             const struct switchdev_obj_port_fdb *fdb,
-                             struct switchdev_trans *trans)
+rocker_world_port_fdb_add(struct rocker_port *rocker_port,
+                         struct switchdev_notifier_fdb_info *info)
 {
        struct rocker_world_ops *wops = rocker_port->rocker->wops;
 
        if (!wops->port_obj_fdb_add)
                return -EOPNOTSUPP;
-       return wops->port_obj_fdb_add(rocker_port, fdb, trans);
-}
-
-static int
-rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port,
-                             const struct switchdev_obj_port_fdb *fdb)
-{
-       struct rocker_world_ops *wops = rocker_port->rocker->wops;
 
-       if (!wops->port_obj_fdb_del)
-               return -EOPNOTSUPP;
-       return wops->port_obj_fdb_del(rocker_port, fdb);
+       return wops->port_obj_fdb_add(rocker_port, info->vid, info->addr);
 }
 
 static int
-rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port,
-                              struct switchdev_obj_port_fdb *fdb,
-                              switchdev_obj_dump_cb_t *cb)
+rocker_world_port_fdb_del(struct rocker_port *rocker_port,
+                         struct switchdev_notifier_fdb_info *info)
 {
        struct rocker_world_ops *wops = rocker_port->rocker->wops;
 
-       if (!wops->port_obj_fdb_dump)
+       if (!wops->port_obj_fdb_del)
                return -EOPNOTSUPP;
-       return wops->port_obj_fdb_dump(rocker_port, fdb, cb);
+       return wops->port_obj_fdb_del(rocker_port, info->vid, info->addr);
 }
 
 static int rocker_world_port_master_linked(struct rocker_port *rocker_port,
@@ -2022,12 +2028,6 @@ static const struct net_device_ops rocker_port_netdev_ops = {
        .ndo_start_xmit                 = rocker_port_xmit,
        .ndo_set_mac_address            = rocker_port_set_mac_address,
        .ndo_change_mtu                 = rocker_port_change_mtu,
-       .ndo_bridge_getlink             = switchdev_port_bridge_getlink,
-       .ndo_bridge_setlink             = switchdev_port_bridge_setlink,
-       .ndo_bridge_dellink             = switchdev_port_bridge_dellink,
-       .ndo_fdb_add                    = switchdev_port_fdb_add,
-       .ndo_fdb_del                    = switchdev_port_fdb_del,
-       .ndo_fdb_dump                   = switchdev_port_fdb_dump,
        .ndo_get_phys_port_name         = rocker_port_get_phys_port_name,
        .ndo_change_proto_down          = rocker_port_change_proto_down,
        .ndo_neigh_destroy              = rocker_port_neigh_destroy,
@@ -2053,6 +2053,10 @@ static int rocker_port_attr_get(struct net_device *dev,
                err = rocker_world_port_attr_bridge_flags_get(rocker_port,
                                                              &attr->u.brport_flags);
                break;
+       case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
+               err = rocker_world_port_attr_bridge_flags_support_get(rocker_port,
+                                                                     &attr->u.brport_flags_support);
+               break;
        default:
                return -EOPNOTSUPP;
        }
@@ -2104,11 +2108,6 @@ static int rocker_port_obj_add(struct net_device *dev,
                                                     SWITCHDEV_OBJ_PORT_VLAN(obj),
                                                     trans);
                break;
-       case SWITCHDEV_OBJ_ID_PORT_FDB:
-               err = rocker_world_port_obj_fdb_add(rocker_port,
-                                                   SWITCHDEV_OBJ_PORT_FDB(obj),
-                                                   trans);
-               break;
        default:
                err = -EOPNOTSUPP;
                break;
@@ -2128,36 +2127,6 @@ static int rocker_port_obj_del(struct net_device *dev,
                err = rocker_world_port_obj_vlan_del(rocker_port,
                                                     SWITCHDEV_OBJ_PORT_VLAN(obj));
                break;
-       case SWITCHDEV_OBJ_ID_PORT_FDB:
-               err = rocker_world_port_obj_fdb_del(rocker_port,
-                                                   SWITCHDEV_OBJ_PORT_FDB(obj));
-               break;
-       default:
-               err = -EOPNOTSUPP;
-               break;
-       }
-
-       return err;
-}
-
-static int rocker_port_obj_dump(struct net_device *dev,
-                               struct switchdev_obj *obj,
-                               switchdev_obj_dump_cb_t *cb)
-{
-       const struct rocker_port *rocker_port = netdev_priv(dev);
-       int err = 0;
-
-       switch (obj->id) {
-       case SWITCHDEV_OBJ_ID_PORT_FDB:
-               err = rocker_world_port_obj_fdb_dump(rocker_port,
-                                                    SWITCHDEV_OBJ_PORT_FDB(obj),
-                                                    cb);
-               break;
-       case SWITCHDEV_OBJ_ID_PORT_VLAN:
-               err = rocker_world_port_obj_vlan_dump(rocker_port,
-                                                     SWITCHDEV_OBJ_PORT_VLAN(obj),
-                                                     cb);
-               break;
        default:
                err = -EOPNOTSUPP;
                break;
@@ -2171,7 +2140,6 @@ static const struct switchdev_ops rocker_port_switchdev_ops = {
        .switchdev_port_attr_set        = rocker_port_attr_set,
        .switchdev_port_obj_add         = rocker_port_obj_add,
        .switchdev_port_obj_del         = rocker_port_obj_del,
-       .switchdev_port_obj_dump        = rocker_port_obj_dump,
 };
 
 struct rocker_fib_event_work {
@@ -2729,6 +2697,109 @@ static void rocker_msix_fini(const struct rocker *rocker)
        kfree(rocker->msix_entries);
 }
 
+static bool rocker_port_dev_check(const struct net_device *dev)
+{
+       return dev->netdev_ops == &rocker_port_netdev_ops;
+}
+
+struct rocker_switchdev_event_work {
+       struct work_struct work;
+       struct switchdev_notifier_fdb_info fdb_info;
+       struct rocker_port *rocker_port;
+       unsigned long event;
+};
+
+static void
+rocker_fdb_offload_notify(struct rocker_port *rocker_port,
+                         struct switchdev_notifier_fdb_info *recv_info)
+{
+       struct switchdev_notifier_fdb_info info;
+
+       info.addr = recv_info->addr;
+       info.vid = recv_info->vid;
+       call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+                                rocker_port->dev, &info.info);
+}
+
+static void rocker_switchdev_event_work(struct work_struct *work)
+{
+       struct rocker_switchdev_event_work *switchdev_work =
+               container_of(work, struct rocker_switchdev_event_work, work);
+       struct rocker_port *rocker_port = switchdev_work->rocker_port;
+       struct switchdev_notifier_fdb_info *fdb_info;
+       int err;
+
+       rtnl_lock();
+       switch (switchdev_work->event) {
+       case SWITCHDEV_FDB_ADD_TO_DEVICE:
+               fdb_info = &switchdev_work->fdb_info;
+               err = rocker_world_port_fdb_add(rocker_port, fdb_info);
+               if (err) {
+                       netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err);
+                       break;
+               }
+               rocker_fdb_offload_notify(rocker_port, fdb_info);
+               break;
+       case SWITCHDEV_FDB_DEL_TO_DEVICE:
+               fdb_info = &switchdev_work->fdb_info;
+               err = rocker_world_port_fdb_del(rocker_port, fdb_info);
+               if (err)
+                       netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err);
+               break;
+       }
+       rtnl_unlock();
+
+       kfree(switchdev_work->fdb_info.addr);
+       kfree(switchdev_work);
+       dev_put(rocker_port->dev);
+}
+
+/* called under rcu_read_lock() */
+static int rocker_switchdev_event(struct notifier_block *unused,
+                                 unsigned long event, void *ptr)
+{
+       struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+       struct rocker_switchdev_event_work *switchdev_work;
+       struct switchdev_notifier_fdb_info *fdb_info = ptr;
+       struct rocker_port *rocker_port;
+
+       if (!rocker_port_dev_check(dev))
+               return NOTIFY_DONE;
+
+       rocker_port = netdev_priv(dev);
+       switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+       if (WARN_ON(!switchdev_work))
+               return NOTIFY_BAD;
+
+       INIT_WORK(&switchdev_work->work, rocker_switchdev_event_work);
+       switchdev_work->rocker_port = rocker_port;
+       switchdev_work->event = event;
+
+       switch (event) {
+       case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
+       case SWITCHDEV_FDB_DEL_TO_DEVICE:
+               memcpy(&switchdev_work->fdb_info, ptr,
+                      sizeof(switchdev_work->fdb_info));
+               switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+               ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+                               fdb_info->addr);
+               /* Take a reference on the rocker device */
+               dev_hold(dev);
+               break;
+       default:
+               kfree(switchdev_work);
+               return NOTIFY_DONE;
+       }
+
+       queue_work(rocker_port->rocker->rocker_owq,
+                  &switchdev_work->work);
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block rocker_switchdev_notifier = {
+       .notifier_call = rocker_switchdev_event,
+};
+
 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct rocker *rocker;
@@ -2834,6 +2905,12 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (err)
                goto err_register_fib_notifier;
 
+       err = register_switchdev_notifier(&rocker_switchdev_notifier);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to register switchdev notifier\n");
+               goto err_register_switchdev_notifier;
+       }
+
        rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
 
        err = rocker_probe_ports(rocker);
@@ -2848,6 +2925,8 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        return 0;
 
 err_probe_ports:
+       unregister_switchdev_notifier(&rocker_switchdev_notifier);
+err_register_switchdev_notifier:
        unregister_fib_notifier(&rocker->fib_nb);
 err_register_fib_notifier:
        destroy_workqueue(rocker->rocker_owq);
@@ -2878,6 +2957,7 @@ static void rocker_remove(struct pci_dev *pdev)
        struct rocker *rocker = pci_get_drvdata(pdev);
 
        rocker_remove_ports(rocker);
+       unregister_switchdev_notifier(&rocker_switchdev_notifier);
        unregister_fib_notifier(&rocker->fib_nb);
        rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
        destroy_workqueue(rocker->rocker_owq);
@@ -2902,11 +2982,6 @@ static struct pci_driver rocker_pci_driver = {
  * Net device notifier event handler
  ************************************/
 
-static bool rocker_port_dev_check(const struct net_device *dev)
-{
-       return dev->netdev_ops == &rocker_port_netdev_ops;
-}
-
 static bool rocker_port_dev_check_under(const struct net_device *dev,
                                        struct rocker *rocker)
 {
index 2ae85245478087d2d640617bd79bfbfabd5f0763..bd0e3f157e9e8629eaf0ebe591a475eb9e7b5979 100644 (file)
@@ -300,64 +300,6 @@ static bool ofdpa_flags_nowait(int flags)
        return flags & OFDPA_OP_FLAG_NOWAIT;
 }
 
-static void *__ofdpa_mem_alloc(struct switchdev_trans *trans, int flags,
-                              size_t size)
-{
-       struct switchdev_trans_item *elem = NULL;
-       gfp_t gfp_flags = (flags & OFDPA_OP_FLAG_NOWAIT) ?
-                         GFP_ATOMIC : GFP_KERNEL;
-
-       /* If in transaction prepare phase, allocate the memory
-        * and enqueue it on a transaction.  If in transaction
-        * commit phase, dequeue the memory from the transaction
-        * rather than re-allocating the memory.  The idea is the
-        * driver code paths for prepare and commit are identical
-        * so the memory allocated in the prepare phase is the
-        * memory used in the commit phase.
-        */
-
-       if (!trans) {
-               elem = kzalloc(size + sizeof(*elem), gfp_flags);
-       } else if (switchdev_trans_ph_prepare(trans)) {
-               elem = kzalloc(size + sizeof(*elem), gfp_flags);
-               if (!elem)
-                       return NULL;
-               switchdev_trans_item_enqueue(trans, elem, kfree, elem);
-       } else {
-               elem = switchdev_trans_item_dequeue(trans);
-       }
-
-       return elem ? elem + 1 : NULL;
-}
-
-static void *ofdpa_kzalloc(struct switchdev_trans *trans, int flags,
-                          size_t size)
-{
-       return __ofdpa_mem_alloc(trans, flags, size);
-}
-
-static void *ofdpa_kcalloc(struct switchdev_trans *trans, int flags,
-                          size_t n, size_t size)
-{
-       return __ofdpa_mem_alloc(trans, flags, n * size);
-}
-
-static void ofdpa_kfree(struct switchdev_trans *trans, const void *mem)
-{
-       struct switchdev_trans_item *elem;
-
-       /* Frees are ignored if in transaction prepare phase.  The
-        * memory remains on the per-port list until freed in the
-        * commit phase.
-        */
-
-       if (switchdev_trans_ph_prepare(trans))
-               return;
-
-       elem = (struct switchdev_trans_item *) mem - 1;
-       kfree(elem);
-}
-
 /*************************************************************
  * Flow, group, FDB, internal VLAN and neigh command prepares
  *************************************************************/
@@ -815,8 +757,7 @@ ofdpa_flow_tbl_find(const struct ofdpa *ofdpa,
 }
 
 static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
-                             struct switchdev_trans *trans, int flags,
-                             struct ofdpa_flow_tbl_entry *match)
+                             int flags, struct ofdpa_flow_tbl_entry *match)
 {
        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
        struct ofdpa_flow_tbl_entry *found;
@@ -831,9 +772,8 @@ static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
 
        if (found) {
                match->cookie = found->cookie;
-               if (!switchdev_trans_ph_prepare(trans))
-                       hash_del(&found->entry);
-               ofdpa_kfree(trans, found);
+               hash_del(&found->entry);
+               kfree(found);
                found = match;
                found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
        } else {
@@ -842,22 +782,18 @@ static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
                found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
        }
 
-       if (!switchdev_trans_ph_prepare(trans))
-               hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
-
+       hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
        spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
 
-       if (!switchdev_trans_ph_prepare(trans))
-               return rocker_cmd_exec(ofdpa_port->rocker_port,
-                                      ofdpa_flags_nowait(flags),
-                                      ofdpa_cmd_flow_tbl_add,
-                                      found, NULL, NULL);
+       return rocker_cmd_exec(ofdpa_port->rocker_port,
+                              ofdpa_flags_nowait(flags),
+                              ofdpa_cmd_flow_tbl_add,
+                              found, NULL, NULL);
        return 0;
 }
 
 static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
-                             struct switchdev_trans *trans, int flags,
-                             struct ofdpa_flow_tbl_entry *match)
+                             int flags, struct ofdpa_flow_tbl_entry *match)
 {
        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
        struct ofdpa_flow_tbl_entry *found;
@@ -872,45 +808,41 @@ static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
        found = ofdpa_flow_tbl_find(ofdpa, match);
 
        if (found) {
-               if (!switchdev_trans_ph_prepare(trans))
-                       hash_del(&found->entry);
+               hash_del(&found->entry);
                found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
        }
 
        spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
 
-       ofdpa_kfree(trans, match);
+       kfree(match);
 
        if (found) {
-               if (!switchdev_trans_ph_prepare(trans))
-                       err = rocker_cmd_exec(ofdpa_port->rocker_port,
-                                             ofdpa_flags_nowait(flags),
-                                             ofdpa_cmd_flow_tbl_del,
-                                             found, NULL, NULL);
-               ofdpa_kfree(trans, found);
+               err = rocker_cmd_exec(ofdpa_port->rocker_port,
+                                     ofdpa_flags_nowait(flags),
+                                     ofdpa_cmd_flow_tbl_del,
+                                     found, NULL, NULL);
+               kfree(found);
        }
 
        return err;
 }
 
-static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port,
-                            struct switchdev_trans *trans, int flags,
+static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
                             struct ofdpa_flow_tbl_entry *entry)
 {
        if (flags & OFDPA_OP_FLAG_REMOVE)
-               return ofdpa_flow_tbl_del(ofdpa_port, trans, flags, entry);
+               return ofdpa_flow_tbl_del(ofdpa_port, flags, entry);
        else
-               return ofdpa_flow_tbl_add(ofdpa_port, trans, flags, entry);
+               return ofdpa_flow_tbl_add(ofdpa_port, flags, entry);
 }
 
-static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port,
-                                 struct switchdev_trans *trans, int flags,
+static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port, int flags,
                                  u32 in_pport, u32 in_pport_mask,
                                  enum rocker_of_dpa_table_id goto_tbl)
 {
        struct ofdpa_flow_tbl_entry *entry;
 
-       entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry)
                return -ENOMEM;
 
@@ -920,11 +852,11 @@ static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port,
        entry->key.ig_port.in_pport_mask = in_pport_mask;
        entry->key.ig_port.goto_tbl = goto_tbl;
 
-       return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+       return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
 }
 
 static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
-                              struct switchdev_trans *trans, int flags,
+                              int flags,
                               u32 in_pport, __be16 vlan_id,
                               __be16 vlan_id_mask,
                               enum rocker_of_dpa_table_id goto_tbl,
@@ -932,7 +864,7 @@ static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
 {
        struct ofdpa_flow_tbl_entry *entry;
 
-       entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry)
                return -ENOMEM;
 
@@ -946,11 +878,10 @@ static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
        entry->key.vlan.untagged = untagged;
        entry->key.vlan.new_vlan_id = new_vlan_id;
 
-       return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+       return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
 }
 
 static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
-                                  struct switchdev_trans *trans,
                                   u32 in_pport, u32 in_pport_mask,
                                   __be16 eth_type, const u8 *eth_dst,
                                   const u8 *eth_dst_mask, __be16 vlan_id,
@@ -959,7 +890,7 @@ static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
 {
        struct ofdpa_flow_tbl_entry *entry;
 
-       entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry)
                return -ENOMEM;
 
@@ -983,13 +914,13 @@ static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
        entry->key.term_mac.vlan_id_mask = vlan_id_mask;
        entry->key.term_mac.copy_to_cpu = copy_to_cpu;
 
-       return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+       return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
 }
 
 static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
-                                struct switchdev_trans *trans, int flags,
-                                const u8 *eth_dst, const u8 *eth_dst_mask,
-                                __be16 vlan_id, u32 tunnel_id,
+                                int flags, const u8 *eth_dst,
+                                const u8 *eth_dst_mask,  __be16 vlan_id,
+                                u32 tunnel_id,
                                 enum rocker_of_dpa_table_id goto_tbl,
                                 u32 group_id, bool copy_to_cpu)
 {
@@ -999,7 +930,7 @@ static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
        bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
        bool wild = false;
 
-       entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+       entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
        if (!entry)
                return -ENOMEM;
 
@@ -1037,11 +968,10 @@ static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
        entry->key.bridge.group_id = group_id;
        entry->key.bridge.copy_to_cpu = copy_to_cpu;
 
-       return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+       return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
 }
 
 static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
-                                        struct switchdev_trans *trans,
                                         __be16 eth_type, __be32 dst,
                                         __be32 dst_mask, u32 priority,
                                         enum rocker_of_dpa_table_id goto_tbl,
@@ -1050,7 +980,7 @@ static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
 {
        struct ofdpa_flow_tbl_entry *entry;
 
-       entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry)
                return -ENOMEM;
 
@@ -1065,11 +995,10 @@ static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
                                  ucast_routing.group_id);
        entry->fi = fi;
 
-       return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+       return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
 }
 
-static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port,
-                             struct switchdev_trans *trans, int flags,
+static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port, int flags,
                              u32 in_pport, u32 in_pport_mask,
                              const u8 *eth_src, const u8 *eth_src_mask,
                              const u8 *eth_dst, const u8 *eth_dst_mask,
@@ -1081,7 +1010,7 @@ static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port,
        u32 priority;
        struct ofdpa_flow_tbl_entry *entry;
 
-       entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry)
                return -ENOMEM;
 
@@ -1116,7 +1045,7 @@ static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port,
        entry->key.acl.ip_tos_mask = ip_tos_mask;
        entry->key.acl.group_id = group_id;
 
-       return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+       return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
 }
 
 static struct ofdpa_group_tbl_entry *
@@ -1134,22 +1063,20 @@ ofdpa_group_tbl_find(const struct ofdpa *ofdpa,
        return NULL;
 }
 
-static void ofdpa_group_tbl_entry_free(struct switchdev_trans *trans,
-                                      struct ofdpa_group_tbl_entry *entry)
+static void ofdpa_group_tbl_entry_free(struct ofdpa_group_tbl_entry *entry)
 {
        switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
        case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
        case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
-               ofdpa_kfree(trans, entry->group_ids);
+               kfree(entry->group_ids);
                break;
        default:
                break;
        }
-       ofdpa_kfree(trans, entry);
+       kfree(entry);
 }
 
-static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port,
-                              struct switchdev_trans *trans, int flags,
+static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port, int flags,
                               struct ofdpa_group_tbl_entry *match)
 {
        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
@@ -1161,9 +1088,8 @@ static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port,
        found = ofdpa_group_tbl_find(ofdpa, match);
 
        if (found) {
-               if (!switchdev_trans_ph_prepare(trans))
-                       hash_del(&found->entry);
-               ofdpa_group_tbl_entry_free(trans, found);
+               hash_del(&found->entry);
+               ofdpa_group_tbl_entry_free(found);
                found = match;
                found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
        } else {
@@ -1171,21 +1097,17 @@ static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port,
                found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
        }
 
-       if (!switchdev_trans_ph_prepare(trans))
-               hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
+       hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
 
        spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
 
-       if (!switchdev_trans_ph_prepare(trans))
-               return rocker_cmd_exec(ofdpa_port->rocker_port,
-                                      ofdpa_flags_nowait(flags),
-                                      ofdpa_cmd_group_tbl_add,
-                                      found, NULL, NULL);
-       return 0;
+       return rocker_cmd_exec(ofdpa_port->rocker_port,
+                              ofdpa_flags_nowait(flags),
+                              ofdpa_cmd_group_tbl_add,
+                              found, NULL, NULL);
 }
 
-static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port,
-                              struct switchdev_trans *trans, int flags,
+static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port, int flags,
                               struct ofdpa_group_tbl_entry *match)
 {
        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
@@ -1198,97 +1120,90 @@ static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port,
        found = ofdpa_group_tbl_find(ofdpa, match);
 
        if (found) {
-               if (!switchdev_trans_ph_prepare(trans))
-                       hash_del(&found->entry);
+               hash_del(&found->entry);
                found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
        }
 
        spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
 
-       ofdpa_group_tbl_entry_free(trans, match);
+       ofdpa_group_tbl_entry_free(match);
 
        if (found) {
-               if (!switchdev_trans_ph_prepare(trans))
-                       err = rocker_cmd_exec(ofdpa_port->rocker_port,
-                                             ofdpa_flags_nowait(flags),
-                                             ofdpa_cmd_group_tbl_del,
-                                             found, NULL, NULL);
-               ofdpa_group_tbl_entry_free(trans, found);
+               err = rocker_cmd_exec(ofdpa_port->rocker_port,
+                                     ofdpa_flags_nowait(flags),
+                                     ofdpa_cmd_group_tbl_del,
+                                     found, NULL, NULL);
+               ofdpa_group_tbl_entry_free(found);
        }
 
        return err;
 }
 
-static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port,
-                             struct switchdev_trans *trans, int flags,
+static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
                              struct ofdpa_group_tbl_entry *entry)
 {
        if (flags & OFDPA_OP_FLAG_REMOVE)
-               return ofdpa_group_tbl_del(ofdpa_port, trans, flags, entry);
+               return ofdpa_group_tbl_del(ofdpa_port, flags, entry);
        else
-               return ofdpa_group_tbl_add(ofdpa_port, trans, flags, entry);
+               return ofdpa_group_tbl_add(ofdpa_port, flags, entry);
 }
 
 static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port,
-                                   struct switchdev_trans *trans, int flags,
-                                   __be16 vlan_id, u32 out_pport,
-                                   int pop_vlan)
+                                   int flags, __be16 vlan_id,
+                                   u32 out_pport, int pop_vlan)
 {
        struct ofdpa_group_tbl_entry *entry;
 
-       entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry)
                return -ENOMEM;
 
        entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
        entry->l2_interface.pop_vlan = pop_vlan;
 
-       return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
+       return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
 }
 
 static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port,
-                                 struct switchdev_trans *trans,
                                  int flags, u8 group_count,
                                  const u32 *group_ids, u32 group_id)
 {
        struct ofdpa_group_tbl_entry *entry;
 
-       entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry)
                return -ENOMEM;
 
        entry->group_id = group_id;
        entry->group_count = group_count;
 
-       entry->group_ids = ofdpa_kcalloc(trans, flags,
-                                        group_count, sizeof(u32));
+       entry->group_ids = kcalloc(flags, group_count, sizeof(u32));
        if (!entry->group_ids) {
-               ofdpa_kfree(trans, entry);
+               kfree(entry);
                return -ENOMEM;
        }
        memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
 
-       return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
+       return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
 }
 
 static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port,
-                               struct switchdev_trans *trans, int flags,
-                               __be16 vlan_id, u8 group_count,
-                               const u32 *group_ids, u32 group_id)
+                               int flags, __be16 vlan_id,
+                               u8 group_count, const u32 *group_ids,
+                               u32 group_id)
 {
-       return ofdpa_group_l2_fan_out(ofdpa_port, trans, flags,
+       return ofdpa_group_l2_fan_out(ofdpa_port, flags,
                                      group_count, group_ids,
                                      group_id);
 }
 
-static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port,
-                                 struct switchdev_trans *trans, int flags,
+static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port, int flags,
                                  u32 index, const u8 *src_mac, const u8 *dst_mac,
                                  __be16 vlan_id, bool ttl_check, u32 pport)
 {
        struct ofdpa_group_tbl_entry *entry;
 
-       entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry)
                return -ENOMEM;
 
@@ -1301,7 +1216,7 @@ static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port,
        entry->l3_unicast.ttl_check = ttl_check;
        entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
 
-       return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
+       return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
 }
 
 static struct ofdpa_neigh_tbl_entry *
@@ -1318,43 +1233,34 @@ ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr)
 }
 
 static void ofdpa_neigh_add(struct ofdpa *ofdpa,
-                           struct switchdev_trans *trans,
                            struct ofdpa_neigh_tbl_entry *entry)
 {
-       if (!switchdev_trans_ph_commit(trans))
-               entry->index = ofdpa->neigh_tbl_next_index++;
-       if (switchdev_trans_ph_prepare(trans))
-               return;
+       entry->index = ofdpa->neigh_tbl_next_index++;
        entry->ref_count++;
        hash_add(ofdpa->neigh_tbl, &entry->entry,
                 be32_to_cpu(entry->ip_addr));
 }
 
-static void ofdpa_neigh_del(struct switchdev_trans *trans,
-                           struct ofdpa_neigh_tbl_entry *entry)
+static void ofdpa_neigh_del(struct ofdpa_neigh_tbl_entry *entry)
 {
-       if (switchdev_trans_ph_prepare(trans))
-               return;
        if (--entry->ref_count == 0) {
                hash_del(&entry->entry);
-               ofdpa_kfree(trans, entry);
+               kfree(entry);
        }
 }
 
 static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry,
-                              struct switchdev_trans *trans,
                               const u8 *eth_dst, bool ttl_check)
 {
        if (eth_dst) {
                ether_addr_copy(entry->eth_dst, eth_dst);
                entry->ttl_check = ttl_check;
-       } else if (!switchdev_trans_ph_prepare(trans)) {
+       } else {
                entry->ref_count++;
        }
 }
 
 static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
-                                struct switchdev_trans *trans,
                                 int flags, __be32 ip_addr, const u8 *eth_dst)
 {
        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
@@ -1371,7 +1277,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
        bool removing;
        int err = 0;
 
-       entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry)
                return -ENOMEM;
 
@@ -1388,12 +1294,12 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
                entry->dev = ofdpa_port->dev;
                ether_addr_copy(entry->eth_dst, eth_dst);
                entry->ttl_check = true;
-               ofdpa_neigh_add(ofdpa, trans, entry);
+               ofdpa_neigh_add(ofdpa, entry);
        } else if (removing) {
                memcpy(entry, found, sizeof(*entry));
-               ofdpa_neigh_del(trans, found);
+               ofdpa_neigh_del(found);
        } else if (updating) {
-               ofdpa_neigh_update(found, trans, eth_dst, true);
+               ofdpa_neigh_update(found, eth_dst, true);
                memcpy(entry, found, sizeof(*entry));
        } else {
                err = -ENOENT;
@@ -1410,7 +1316,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
         * other routes' nexthops.
         */
 
-       err = ofdpa_group_l3_unicast(ofdpa_port, trans, flags,
+       err = ofdpa_group_l3_unicast(ofdpa_port, flags,
                                     entry->index,
                                     ofdpa_port->dev->dev_addr,
                                     entry->eth_dst,
@@ -1425,7 +1331,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
 
        if (adding || removing) {
                group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
-               err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans,
+               err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port,
                                                    eth_type, ip_addr,
                                                    inet_make_mask(32),
                                                    priority, goto_tbl,
@@ -1438,13 +1344,12 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
 
 err_out:
        if (!adding)
-               ofdpa_kfree(trans, entry);
+               kfree(entry);
 
        return err;
 }
 
 static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
-                                  struct switchdev_trans *trans,
                                   __be32 ip_addr)
 {
        struct net_device *dev = ofdpa_port->dev;
@@ -1463,7 +1368,7 @@ static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
         */
 
        if (n->nud_state & NUD_VALID)
-               err = ofdpa_port_ipv4_neigh(ofdpa_port, trans, 0,
+               err = ofdpa_port_ipv4_neigh(ofdpa_port, 0,
                                            ip_addr, n->ha);
        else
                neigh_event_send(n, NULL);
@@ -1473,8 +1378,7 @@ static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
 }
 
 static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
-                             struct switchdev_trans *trans, int flags,
-                             __be32 ip_addr, u32 *index)
+                             int flags, __be32 ip_addr, u32 *index)
 {
        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
        struct ofdpa_neigh_tbl_entry *entry;
@@ -1486,7 +1390,7 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
        bool resolved = true;
        int err = 0;
 
-       entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry)
                return -ENOMEM;
 
@@ -1501,14 +1405,14 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
        if (adding) {
                entry->ip_addr = ip_addr;
                entry->dev = ofdpa_port->dev;
-               ofdpa_neigh_add(ofdpa, trans, entry);
+               ofdpa_neigh_add(ofdpa, entry);
                *index = entry->index;
                resolved = false;
        } else if (removing) {
-               ofdpa_neigh_del(trans, found);
+               ofdpa_neigh_del(found);
                *index = found->index;
        } else if (updating) {
-               ofdpa_neigh_update(found, trans, NULL, false);
+               ofdpa_neigh_update(found, NULL, false);
                resolved = !is_zero_ether_addr(found->eth_dst);
                *index = found->index;
        } else {
@@ -1518,7 +1422,7 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
        spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
 
        if (!adding)
-               ofdpa_kfree(trans, entry);
+               kfree(entry);
 
        if (err)
                return err;
@@ -1526,7 +1430,7 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
        /* Resolved means neigh ip_addr is resolved to neigh mac. */
 
        if (!resolved)
-               err = ofdpa_port_ipv4_resolve(ofdpa_port, trans, ip_addr);
+               err = ofdpa_port_ipv4_resolve(ofdpa_port, ip_addr);
 
        return err;
 }
@@ -1541,7 +1445,6 @@ static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa,
 }
 
 static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
-                                      struct switchdev_trans *trans,
                                       int flags, __be16 vlan_id)
 {
        struct ofdpa_port *p;
@@ -1553,7 +1456,7 @@ static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
        int err = 0;
        int i;
 
-       group_ids = ofdpa_kcalloc(trans, flags, port_count, sizeof(u32));
+       group_ids = kcalloc(flags, port_count, sizeof(u32));
        if (!group_ids)
                return -ENOMEM;
 
@@ -1578,18 +1481,17 @@ static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
        if (group_count == 0)
                goto no_ports_in_vlan;
 
-       err = ofdpa_group_l2_flood(ofdpa_port, trans, flags, vlan_id,
+       err = ofdpa_group_l2_flood(ofdpa_port, flags, vlan_id,
                                   group_count, group_ids, group_id);
        if (err)
                netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
 
 no_ports_in_vlan:
-       ofdpa_kfree(trans, group_ids);
+       kfree(group_ids);
        return err;
 }
 
-static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port,
-                                    struct switchdev_trans *trans, int flags,
+static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port, int flags,
                                     __be16 vlan_id, bool pop_vlan)
 {
        const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
@@ -1608,7 +1510,7 @@ static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port,
        if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
            ofdpa_port->stp_state == BR_STATE_FORWARDING) {
                out_pport = ofdpa_port->pport;
-               err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
+               err = ofdpa_group_l2_interface(ofdpa_port, flags,
                                               vlan_id, out_pport, pop_vlan);
                if (err) {
                        netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
@@ -1632,7 +1534,7 @@ static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port,
                return 0;
 
        out_pport = 0;
-       err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
+       err = ofdpa_group_l2_interface(ofdpa_port, flags,
                                       vlan_id, out_pport, pop_vlan);
        if (err) {
                netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err);
@@ -1693,8 +1595,7 @@ static struct ofdpa_ctrl {
        },
 };
 
-static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port,
-                                   struct switchdev_trans *trans, int flags,
+static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port, int flags,
                                    const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
 {
        u32 in_pport = ofdpa_port->pport;
@@ -1710,7 +1611,7 @@ static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port,
        u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
        int err;
 
-       err = ofdpa_flow_tbl_acl(ofdpa_port, trans, flags,
+       err = ofdpa_flow_tbl_acl(ofdpa_port, flags,
                                 in_pport, in_pport_mask,
                                 eth_src, eth_src_mask,
                                 ctrl->eth_dst, ctrl->eth_dst_mask,
@@ -1727,9 +1628,7 @@ static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port,
 }
 
 static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
-                                      struct switchdev_trans *trans,
-                                      int flags,
-                                      const struct ofdpa_ctrl *ctrl,
+                                      int flags, const struct ofdpa_ctrl *ctrl,
                                       __be16 vlan_id)
 {
        enum rocker_of_dpa_table_id goto_tbl =
@@ -1741,7 +1640,7 @@ static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
        if (!ofdpa_port_is_bridged(ofdpa_port))
                return 0;
 
-       err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags,
+       err = ofdpa_flow_tbl_bridge(ofdpa_port, flags,
                                    ctrl->eth_dst, ctrl->eth_dst_mask,
                                    vlan_id, tunnel_id,
                                    goto_tbl, group_id, ctrl->copy_to_cpu);
@@ -1752,8 +1651,7 @@ static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
        return err;
 }
 
-static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port,
-                                    struct switchdev_trans *trans, int flags,
+static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port, int flags,
                                     const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
 {
        u32 in_pport_mask = 0xffffffff;
@@ -1763,8 +1661,7 @@ static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port,
        if (ntohs(vlan_id) == 0)
                vlan_id = ofdpa_port->internal_vlan_id;
 
-       err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
-                                     ofdpa_port->pport, in_pport_mask,
+       err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport, in_pport_mask,
                                      ctrl->eth_type, ctrl->eth_dst,
                                      ctrl->eth_dst_mask, vlan_id,
                                      vlan_id_mask, ctrl->copy_to_cpu,
@@ -1776,26 +1673,24 @@ static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port,
        return err;
 }
 
-static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port,
-                               struct switchdev_trans *trans, int flags,
+static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port, int flags,
                                const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
 {
        if (ctrl->acl)
-               return ofdpa_port_ctrl_vlan_acl(ofdpa_port, trans, flags,
+               return ofdpa_port_ctrl_vlan_acl(ofdpa_port, flags,
                                                ctrl, vlan_id);
        if (ctrl->bridge)
-               return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, trans, flags,
+               return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, flags,
                                                   ctrl, vlan_id);
 
        if (ctrl->term)
-               return ofdpa_port_ctrl_vlan_term(ofdpa_port, trans, flags,
+               return ofdpa_port_ctrl_vlan_term(ofdpa_port, flags,
                                                 ctrl, vlan_id);
 
        return -EOPNOTSUPP;
 }
 
-static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port,
-                                   struct switchdev_trans *trans, int flags,
+static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port, int flags,
                                    __be16 vlan_id)
 {
        int err = 0;
@@ -1803,7 +1698,7 @@ static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port,
 
        for (i = 0; i < OFDPA_CTRL_MAX; i++) {
                if (ofdpa_port->ctrls[i]) {
-                       err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
+                       err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
                                                   &ofdpa_ctrls[i], vlan_id);
                        if (err)
                                return err;
@@ -1813,8 +1708,7 @@ static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port,
        return err;
 }
 
-static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port,
-                          struct switchdev_trans *trans, int flags,
+static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port, int flags,
                           const struct ofdpa_ctrl *ctrl)
 {
        u16 vid;
@@ -1823,7 +1717,7 @@ static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port,
        for (vid = 1; vid < VLAN_N_VID; vid++) {
                if (!test_bit(vid, ofdpa_port->vlan_bitmap))
                        continue;
-               err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
+               err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
                                           ctrl, htons(vid));
                if (err)
                        break;
@@ -1832,8 +1726,8 @@ static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port,
        return err;
 }
 
-static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port,
-                          struct switchdev_trans *trans, int flags, u16 vid)
+static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port, int flags,
+                          u16 vid)
 {
        enum rocker_of_dpa_table_id goto_tbl =
                        ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
@@ -1857,43 +1751,44 @@ static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port,
        change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
 
        if (adding) {
-               err = ofdpa_port_ctrl_vlan_add(ofdpa_port, trans, flags,
+               err = ofdpa_port_ctrl_vlan_add(ofdpa_port, flags,
                                               internal_vlan_id);
                if (err) {
                        netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err);
-                       goto err_out;
+                       goto err_vlan_add;
                }
        }
 
-       err = ofdpa_port_vlan_l2_groups(ofdpa_port, trans, flags,
+       err = ofdpa_port_vlan_l2_groups(ofdpa_port, flags,
                                        internal_vlan_id, untagged);
        if (err) {
                netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err);
-               goto err_out;
+               goto err_vlan_l2_groups;
        }
 
-       err = ofdpa_port_vlan_flood_group(ofdpa_port, trans, flags,
+       err = ofdpa_port_vlan_flood_group(ofdpa_port, flags,
                                          internal_vlan_id);
        if (err) {
                netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
-               goto err_out;
+               goto err_flood_group;
        }
 
-       err = ofdpa_flow_tbl_vlan(ofdpa_port, trans, flags,
+       err = ofdpa_flow_tbl_vlan(ofdpa_port, flags,
                                  in_pport, vlan_id, vlan_id_mask,
                                  goto_tbl, untagged, internal_vlan_id);
        if (err)
                netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err);
 
-err_out:
-       if (switchdev_trans_ph_prepare(trans))
-               change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
+       return 0;
 
+err_vlan_add:
+err_vlan_l2_groups:
+err_flood_group:
+       change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
        return err;
 }
 
-static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port,
-                            struct switchdev_trans *trans, int flags)
+static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port, int flags)
 {
        enum rocker_of_dpa_table_id goto_tbl;
        u32 in_pport;
@@ -1908,7 +1803,7 @@ static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port,
        in_pport_mask = 0xffff0000;
        goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
 
-       err = ofdpa_flow_tbl_ig_port(ofdpa_port, trans, flags,
+       err = ofdpa_flow_tbl_ig_port(ofdpa_port, flags,
                                     in_pport, in_pport_mask,
                                     goto_tbl);
        if (err)
@@ -1920,7 +1815,6 @@ static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port,
 struct ofdpa_fdb_learn_work {
        struct work_struct work;
        struct ofdpa_port *ofdpa_port;
-       struct switchdev_trans *trans;
        int flags;
        u8 addr[ETH_ALEN];
        u16 vid;
@@ -1939,19 +1833,18 @@ static void ofdpa_port_fdb_learn_work(struct work_struct *work)
 
        rtnl_lock();
        if (learned && removing)
-               call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
+               call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
                                         lw->ofdpa_port->dev, &info.info);
        else if (learned && !removing)
-               call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
+               call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
                                         lw->ofdpa_port->dev, &info.info);
        rtnl_unlock();
 
-       ofdpa_kfree(lw->trans, work);
+       kfree(work);
 }
 
 static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
-                               struct switchdev_trans *trans, int flags,
-                               const u8 *addr, __be16 vlan_id)
+                               int flags, const u8 *addr, __be16 vlan_id)
 {
        struct ofdpa_fdb_learn_work *lw;
        enum rocker_of_dpa_table_id goto_tbl =
@@ -1959,7 +1852,6 @@ static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
        u32 out_pport = ofdpa_port->pport;
        u32 tunnel_id = 0;
        u32 group_id = ROCKER_GROUP_NONE;
-       bool syncing = !!(ofdpa_port->brport_flags & BR_LEARNING_SYNC);
        bool copy_to_cpu = false;
        int err;
 
@@ -1967,36 +1859,28 @@ static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
                group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
 
        if (!(flags & OFDPA_OP_FLAG_REFRESH)) {
-               err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags, addr,
+               err = ofdpa_flow_tbl_bridge(ofdpa_port, flags, addr,
                                            NULL, vlan_id, tunnel_id, goto_tbl,
                                            group_id, copy_to_cpu);
                if (err)
                        return err;
        }
 
-       if (!syncing)
-               return 0;
-
        if (!ofdpa_port_is_bridged(ofdpa_port))
                return 0;
 
-       lw = ofdpa_kzalloc(trans, flags, sizeof(*lw));
+       lw = kzalloc(sizeof(*lw), GFP_ATOMIC);
        if (!lw)
                return -ENOMEM;
 
        INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
 
        lw->ofdpa_port = ofdpa_port;
-       lw->trans = trans;
        lw->flags = flags;
        ether_addr_copy(lw->addr, addr);
        lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id);
 
-       if (switchdev_trans_ph_prepare(trans))
-               ofdpa_kfree(trans, lw);
-       else
-               schedule_work(&lw->work);
-
+       schedule_work(&lw->work);
        return 0;
 }
 
@@ -2014,7 +1898,6 @@ ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa,
 }
 
 static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
-                         struct switchdev_trans *trans,
                          const unsigned char *addr,
                          __be16 vlan_id, int flags)
 {
@@ -2024,7 +1907,7 @@ static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
        bool removing = (flags & OFDPA_OP_FLAG_REMOVE);
        unsigned long lock_flags;
 
-       fdb = ofdpa_kzalloc(trans, flags, sizeof(*fdb));
+       fdb = kzalloc(sizeof(*fdb), GFP_KERNEL);
        if (!fdb)
                return -ENOMEM;
 
@@ -2042,32 +1925,29 @@ static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
        if (found) {
                found->touched = jiffies;
                if (removing) {
-                       ofdpa_kfree(trans, fdb);
-                       if (!switchdev_trans_ph_prepare(trans))
-                               hash_del(&found->entry);
+                       kfree(fdb);
+                       hash_del(&found->entry);
                }
        } else if (!removing) {
-               if (!switchdev_trans_ph_prepare(trans))
-                       hash_add(ofdpa->fdb_tbl, &fdb->entry,
-                                fdb->key_crc32);
+               hash_add(ofdpa->fdb_tbl, &fdb->entry,
+                        fdb->key_crc32);
        }
 
        spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
 
        /* Check if adding and already exists, or removing and can't find */
        if (!found != !removing) {
-               ofdpa_kfree(trans, fdb);
+               kfree(fdb);
                if (!found && removing)
                        return 0;
                /* Refreshing existing to update aging timers */
                flags |= OFDPA_OP_FLAG_REFRESH;
        }
 
-       return ofdpa_port_fdb_learn(ofdpa_port, trans, flags, addr, vlan_id);
+       return ofdpa_port_fdb_learn(ofdpa_port, flags, addr, vlan_id);
 }
 
-static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port,
-                               struct switchdev_trans *trans, int flags)
+static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port, int flags)
 {
        struct ofdpa *ofdpa = ofdpa_port->ofdpa;
        struct ofdpa_fdb_tbl_entry *found;
@@ -2089,13 +1969,12 @@ static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port,
                        continue;
                if (!found->learned)
                        continue;
-               err = ofdpa_port_fdb_learn(ofdpa_port, trans, flags,
+               err = ofdpa_port_fdb_learn(ofdpa_port, flags,
                                           found->key.addr,
                                           found->key.vlan_id);
                if (err)
                        goto err_out;
-               if (!switchdev_trans_ph_prepare(trans))
-                       hash_del(&found->entry);
+               hash_del(&found->entry);
        }
 
 err_out:
@@ -2125,8 +2004,8 @@ static void ofdpa_fdb_cleanup(unsigned long data)
                ofdpa_port = entry->key.ofdpa_port;
                expires = entry->touched + ofdpa_port->ageing_time;
                if (time_before_eq(expires, jiffies)) {
-                       ofdpa_port_fdb_learn(ofdpa_port, NULL,
-                                            flags, entry->key.addr,
+                       ofdpa_port_fdb_learn(ofdpa_port, flags,
+                                            entry->key.addr,
                                             entry->key.vlan_id);
                        hash_del(&entry->entry);
                } else if (time_before(expires, next_timer)) {
@@ -2140,8 +2019,7 @@ static void ofdpa_fdb_cleanup(unsigned long data)
 }
 
 static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
-                                struct switchdev_trans *trans, int flags,
-                                __be16 vlan_id)
+                                int flags, __be16 vlan_id)
 {
        u32 in_pport_mask = 0xffffffff;
        __be16 eth_type;
@@ -2154,26 +2032,25 @@ static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
                vlan_id = ofdpa_port->internal_vlan_id;
 
        eth_type = htons(ETH_P_IP);
-       err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
-                                     ofdpa_port->pport, in_pport_mask,
-                                     eth_type, ofdpa_port->dev->dev_addr,
+       err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
+                                     in_pport_mask, eth_type,
+                                     ofdpa_port->dev->dev_addr,
                                      dst_mac_mask, vlan_id, vlan_id_mask,
                                      copy_to_cpu, flags);
        if (err)
                return err;
 
        eth_type = htons(ETH_P_IPV6);
-       err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
-                                     ofdpa_port->pport, in_pport_mask,
-                                     eth_type, ofdpa_port->dev->dev_addr,
+       err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
+                                     in_pport_mask, eth_type,
+                                     ofdpa_port->dev->dev_addr,
                                      dst_mac_mask, vlan_id, vlan_id_mask,
                                      copy_to_cpu, flags);
 
        return err;
 }
 
-static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port,
-                            struct switchdev_trans *trans, int flags)
+static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port, int flags)
 {
        bool pop_vlan;
        u32 out_pport;
@@ -2198,7 +2075,7 @@ static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port,
                        continue;
                vlan_id = htons(vid);
                pop_vlan = ofdpa_vlan_id_is_internal(vlan_id);
-               err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
+               err = ofdpa_group_l2_interface(ofdpa_port, flags,
                                               vlan_id, out_pport, pop_vlan);
                if (err) {
                        netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
@@ -2211,7 +2088,6 @@ static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port,
 }
 
 static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
-                                struct switchdev_trans *trans,
                                 int flags, u8 state)
 {
        bool want[OFDPA_CTRL_MAX] = { 0, };
@@ -2220,11 +2096,12 @@ static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
        int err;
        int i;
 
+       memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
        prev_state = ofdpa_port->stp_state;
-       if (prev_state == state)
+
+       if (ofdpa_port->stp_state == state)
                return 0;
 
-       memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
        ofdpa_port->stp_state = state;
 
        switch (state) {
@@ -2254,26 +2131,29 @@ static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
                if (want[i] != ofdpa_port->ctrls[i]) {
                        int ctrl_flags = flags |
                                         (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE);
-                       err = ofdpa_port_ctrl(ofdpa_port, trans, ctrl_flags,
+                       err = ofdpa_port_ctrl(ofdpa_port, ctrl_flags,
                                              &ofdpa_ctrls[i]);
                        if (err)
-                               goto err_out;
+                               goto err_port_ctrl;
                        ofdpa_port->ctrls[i] = want[i];
                }
        }
 
-       err = ofdpa_port_fdb_flush(ofdpa_port, trans, flags);
+       err = ofdpa_port_fdb_flush(ofdpa_port, flags);
        if (err)
-               goto err_out;
+               goto err_fdb_flush;
 
-       err = ofdpa_port_fwding(ofdpa_port, trans, flags);
+       err = ofdpa_port_fwding(ofdpa_port, flags);
+       if (err)
+               goto err_port_fwding;
 
-err_out:
-       if (switchdev_trans_ph_prepare(trans)) {
-               memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
-               ofdpa_port->stp_state = prev_state;
-       }
+       return 0;
 
+err_port_ctrl:
+err_fdb_flush:
+err_port_fwding:
+       memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
+       ofdpa_port->stp_state = prev_state;
        return err;
 }
 
@@ -2284,7 +2164,7 @@ static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags)
                return 0;
 
        /* port is not bridged, so simulate going to FORWARDING state */
-       return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
+       return ofdpa_port_stp_update(ofdpa_port, flags,
                                     BR_STATE_FORWARDING);
 }
 
@@ -2295,25 +2175,24 @@ static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags)
                return 0;
 
        /* port is not bridged, so simulate going to DISABLED state */
-       return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
+       return ofdpa_port_stp_update(ofdpa_port, flags,
                                     BR_STATE_DISABLED);
 }
 
 static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port,
-                              struct switchdev_trans *trans,
                               u16 vid, u16 flags)
 {
        int err;
 
        /* XXX deal with flags for PVID and untagged */
 
-       err = ofdpa_port_vlan(ofdpa_port, trans, 0, vid);
+       err = ofdpa_port_vlan(ofdpa_port, 0, vid);
        if (err)
                return err;
 
-       err = ofdpa_port_router_mac(ofdpa_port, trans, 0, htons(vid));
+       err = ofdpa_port_router_mac(ofdpa_port, 0, htons(vid));
        if (err)
-               ofdpa_port_vlan(ofdpa_port, trans,
+               ofdpa_port_vlan(ofdpa_port,
                                OFDPA_OP_FLAG_REMOVE, vid);
 
        return err;
@@ -2324,13 +2203,13 @@ static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port,
 {
        int err;
 
-       err = ofdpa_port_router_mac(ofdpa_port, NULL,
-                                   OFDPA_OP_FLAG_REMOVE, htons(vid));
+       err = ofdpa_port_router_mac(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
+                                   htons(vid));
        if (err)
                return err;
 
-       return ofdpa_port_vlan(ofdpa_port, NULL,
-                              OFDPA_OP_FLAG_REMOVE, vid);
+       return ofdpa_port_vlan(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
+                              vid);
 }
 
 static struct ofdpa_internal_vlan_tbl_entry *
@@ -2389,10 +2268,9 @@ static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port,
        return found->vlan_id;
 }
 
-static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
-                              struct switchdev_trans *trans, __be32 dst,
-                              int dst_len, struct fib_info *fi,
-                              u32 tb_id, int flags)
+static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,  __be32 dst,
+                              int dst_len, struct fib_info *fi, u32 tb_id,
+                              int flags)
 {
        const struct fib_nh *nh;
        __be16 eth_type = htons(ETH_P_IP);
@@ -2414,7 +2292,7 @@ static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
        has_gw = !!nh->nh_gw;
 
        if (has_gw && nh_on_port) {
-               err = ofdpa_port_ipv4_nh(ofdpa_port, trans, flags,
+               err = ofdpa_port_ipv4_nh(ofdpa_port, flags,
                                         nh->nh_gw, &index);
                if (err)
                        return err;
@@ -2425,7 +2303,7 @@ static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
                group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
        }
 
-       err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, eth_type, dst,
+       err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, eth_type, dst,
                                            dst_mask, priority, goto_tbl,
                                            group_id, fi, flags);
        if (err)
@@ -2550,7 +2428,7 @@ static int ofdpa_port_pre_init(struct rocker_port *rocker_port)
        ofdpa_port->rocker_port = rocker_port;
        ofdpa_port->dev = rocker_port->dev;
        ofdpa_port->pport = rocker_port->pport;
-       ofdpa_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
+       ofdpa_port->brport_flags = BR_LEARNING;
        ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME;
        return 0;
 }
@@ -2563,7 +2441,7 @@ static int ofdpa_port_init(struct rocker_port *rocker_port)
        rocker_port_set_learning(rocker_port,
                                 !!(ofdpa_port->brport_flags & BR_LEARNING));
 
-       err = ofdpa_port_ig_tbl(ofdpa_port, NULL, 0);
+       err = ofdpa_port_ig_tbl(ofdpa_port, 0);
        if (err) {
                netdev_err(ofdpa_port->dev, "install ig port table failed\n");
                return err;
@@ -2573,7 +2451,7 @@ static int ofdpa_port_init(struct rocker_port *rocker_port)
                ofdpa_port_internal_vlan_id_get(ofdpa_port,
                                                ofdpa_port->dev->ifindex);
 
-       err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
+       err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
        if (err) {
                netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n");
                goto err_untagged_vlan;
@@ -2581,7 +2459,7 @@ static int ofdpa_port_init(struct rocker_port *rocker_port)
        return 0;
 
 err_untagged_vlan:
-       ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
+       ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
        return err;
 }
 
@@ -2589,7 +2467,7 @@ static void ofdpa_port_fini(struct rocker_port *rocker_port)
 {
        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
 
-       ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
+       ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
 }
 
 static int ofdpa_port_open(struct rocker_port *rocker_port)
@@ -2607,12 +2485,11 @@ static void ofdpa_port_stop(struct rocker_port *rocker_port)
 }
 
 static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
-                                        u8 state,
-                                        struct switchdev_trans *trans)
+                                        u8 state)
 {
        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
 
-       return ofdpa_port_stp_update(ofdpa_port, trans, 0, state);
+       return ofdpa_port_stp_update(ofdpa_port, 0, state);
 }
 
 static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
@@ -2646,6 +2523,16 @@ ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
        return 0;
 }
 
+static int
+ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port *
+                                        rocker_port,
+                                        unsigned long *
+                                        p_brport_flags_support)
+{
+       *p_brport_flags_support = BR_LEARNING;
+       return 0;
+}
+
 static int
 ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
                                       u32 ageing_time,
@@ -2665,15 +2552,14 @@ ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
 }
 
 static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
-                                  const struct switchdev_obj_port_vlan *vlan,
-                                  struct switchdev_trans *trans)
+                                  const struct switchdev_obj_port_vlan *vlan)
 {
        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
        u16 vid;
        int err;
 
        for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
-               err = ofdpa_port_vlan_add(ofdpa_port, trans, vid, vlan->flags);
+               err = ofdpa_port_vlan_add(ofdpa_port, vid, vlan->flags);
                if (err)
                        return err;
        }
@@ -2697,82 +2583,29 @@ static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
        return 0;
 }
 
-static int ofdpa_port_obj_vlan_dump(const struct rocker_port *rocker_port,
-                                   struct switchdev_obj_port_vlan *vlan,
-                                   switchdev_obj_dump_cb_t *cb)
-{
-       const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
-       u16 vid;
-       int err = 0;
-
-       for (vid = 1; vid < VLAN_N_VID; vid++) {
-               if (!test_bit(vid, ofdpa_port->vlan_bitmap))
-                       continue;
-               vlan->flags = 0;
-               if (ofdpa_vlan_id_is_internal(htons(vid)))
-                       vlan->flags |= BRIDGE_VLAN_INFO_PVID;
-               vlan->vid_begin = vlan->vid_end = vid;
-               err = cb(&vlan->obj);
-               if (err)
-                       break;
-       }
-
-       return err;
-}
-
 static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
-                                 const struct switchdev_obj_port_fdb *fdb,
-                                 struct switchdev_trans *trans)
+                                 u16 vid, const unsigned char *addr)
 {
        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
-       __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
+       __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
 
        if (!ofdpa_port_is_bridged(ofdpa_port))
                return -EINVAL;
 
-       return ofdpa_port_fdb(ofdpa_port, trans, fdb->addr, vlan_id, 0);
+       return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, 0);
 }
 
 static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
-                                 const struct switchdev_obj_port_fdb *fdb)
+                                 u16 vid, const unsigned char *addr)
 {
        struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
-       __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
+       __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
        int flags = OFDPA_OP_FLAG_REMOVE;
 
        if (!ofdpa_port_is_bridged(ofdpa_port))
                return -EINVAL;
 
-       return ofdpa_port_fdb(ofdpa_port, NULL, fdb->addr, vlan_id, flags);
-}
-
-static int ofdpa_port_obj_fdb_dump(const struct rocker_port *rocker_port,
-                                  struct switchdev_obj_port_fdb *fdb,
-                                  switchdev_obj_dump_cb_t *cb)
-{
-       const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
-       struct ofdpa *ofdpa = ofdpa_port->ofdpa;
-       struct ofdpa_fdb_tbl_entry *found;
-       struct hlist_node *tmp;
-       unsigned long lock_flags;
-       int bkt;
-       int err = 0;
-
-       spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
-       hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
-               if (found->key.ofdpa_port != ofdpa_port)
-                       continue;
-               ether_addr_copy(fdb->addr, found->key.addr);
-               fdb->ndm_state = NUD_REACHABLE;
-               fdb->vid = ofdpa_port_vlan_to_vid(ofdpa_port,
-                                                 found->key.vlan_id);
-               err = cb(&fdb->obj);
-               if (err)
-                       break;
-       }
-       spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
-
-       return err;
+       return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
 }
 
 static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
@@ -2797,7 +2630,7 @@ static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
 
        ofdpa_port->bridge_dev = bridge;
 
-       return ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
+       return ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
 }
 
 static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
@@ -2816,7 +2649,7 @@ static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
 
        ofdpa_port->bridge_dev = NULL;
 
-       err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
+       err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
        if (err)
                return err;
 
@@ -2875,7 +2708,7 @@ static int ofdpa_port_neigh_update(struct rocker_port *rocker_port,
                                                    OFDPA_OP_FLAG_NOWAIT;
        __be32 ip_addr = *(__be32 *) n->primary_key;
 
-       return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
+       return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
 }
 
 static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
@@ -2885,7 +2718,7 @@ static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
        int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT;
        __be32 ip_addr = *(__be32 *) n->primary_key;
 
-       return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
+       return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
 }
 
 static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
@@ -2899,7 +2732,7 @@ static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
            ofdpa_port->stp_state != BR_STATE_FORWARDING)
                return 0;
 
-       return ofdpa_port_fdb(ofdpa_port, NULL, addr, vlan_id, flags);
+       return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
 }
 
 static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev,
@@ -2923,7 +2756,7 @@ static int ofdpa_fib4_add(struct rocker *rocker,
        ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
        if (!ofdpa_port)
                return 0;
-       err = ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst),
+       err = ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
                                  fen_info->dst_len, fen_info->fi,
                                  fen_info->tb_id, 0);
        if (err)
@@ -2944,7 +2777,7 @@ static int ofdpa_fib4_del(struct rocker *rocker,
        if (!ofdpa_port)
                return 0;
        fib_info_offload_dec(fen_info->fi);
-       return ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst),
+       return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
                                   fen_info->dst_len, fen_info->fi,
                                   fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
 }
@@ -2971,7 +2804,7 @@ static void ofdpa_fib4_abort(struct rocker *rocker)
                if (!ofdpa_port)
                        continue;
                fib_info_offload_dec(flow_entry->fi);
-               ofdpa_flow_tbl_del(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE,
+               ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
                                   flow_entry);
        }
        spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
@@ -2993,13 +2826,12 @@ struct rocker_world_ops rocker_ofdpa_ops = {
        .port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
        .port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
        .port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get,
+       .port_attr_bridge_flags_support_get = ofdpa_port_attr_bridge_flags_support_get,
        .port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
        .port_obj_vlan_add = ofdpa_port_obj_vlan_add,
        .port_obj_vlan_del = ofdpa_port_obj_vlan_del,
-       .port_obj_vlan_dump = ofdpa_port_obj_vlan_dump,
        .port_obj_fdb_add = ofdpa_port_obj_fdb_add,
        .port_obj_fdb_del = ofdpa_port_obj_fdb_del,
-       .port_obj_fdb_dump = ofdpa_port_obj_fdb_dump,
        .port_master_linked = ofdpa_port_master_linked,
        .port_master_unlinked = ofdpa_port_master_unlinked,
        .port_neigh_update = ofdpa_port_neigh_update,
index a0c52e3281024b566dfe4b58e3014f26aadb0eaf..fcea9371ab7f636b885babfdc3ded58a6a941eec 100644 (file)
@@ -32,8 +32,8 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
                                struct net_device *net_dev);
 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
-                struct tc_to_netdev *tc);
+int efx_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index,
+                __be16 proto, struct tc_to_netdev *tc);
 unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
 extern unsigned int efx_piobuf_size;
 extern bool efx_separate_tx_channels;
index c89456fa148c797757411b34acca918aba2584b7..e5a7a40cc8b68ee092dddc2068760cb1d716383a 100644 (file)
@@ -32,8 +32,8 @@ netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb,
                                struct net_device *net_dev);
 netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb);
 void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index);
-int ef4_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
-                struct tc_to_netdev *tc);
+int ef4_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index,
+                __be16 proto, struct tc_to_netdev *tc);
 unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx);
 extern bool ef4_separate_tx_channels;
 
index f6daf09b86272397d35bc72d59c5269b4644db1c..f1520a404ac619851d1f6ed5622828b6c7fbe44c 100644 (file)
@@ -425,8 +425,8 @@ void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue)
                                     efx->n_tx_channels : 0));
 }
 
-int ef4_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
-                struct tc_to_netdev *ntc)
+int ef4_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index,
+                __be16 proto, struct tc_to_netdev *ntc)
 {
        struct ef4_nic *efx = netdev_priv(net_dev);
        struct ef4_channel *channel;
index 3bdf87f310877a31fee219afa3de3dea91e40521..02d41eb4a8e9ae2cdabae7f1400d8506ce0abc85 100644 (file)
@@ -653,8 +653,8 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
                                     efx->n_tx_channels : 0));
 }
 
-int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
-                struct tc_to_netdev *ntc)
+int efx_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index,
+                __be16 proto, struct tc_to_netdev *ntc)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
        struct efx_channel *channel;
index 37fc16521143ed8391d81103e621e7e723ab7919..b6a0d92dd6377cd548b3c5e766ebd8a576833888 100644 (file)
@@ -1731,11 +1731,14 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
                cpts_rx_enable(cpts, 0);
                break;
        case HWTSTAMP_FILTER_ALL:
+       case HWTSTAMP_FILTER_NTP_ALL:
+               return -ERANGE;
        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
-       case HWTSTAMP_FILTER_NTP_ALL:
-               return -ERANGE;
+               cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+               cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+               break;
        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
@@ -1745,7 +1748,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
        case HWTSTAMP_FILTER_PTP_V2_EVENT:
        case HWTSTAMP_FILTER_PTP_V2_SYNC:
        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
-               cpts_rx_enable(cpts, 1);
+               cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
                cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
                break;
        default:
@@ -1784,7 +1787,7 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
        cfg.tx_type = cpts_is_tx_enabled(cpts) ?
                      HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
        cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
-                        HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE);
+                        cpts->rx_enable : HWTSTAMP_FILTER_NONE);
 
        return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
 }
@@ -2141,6 +2144,7 @@ static int cpsw_get_ts_info(struct net_device *ndev,
                (1 << HWTSTAMP_TX_ON);
        info->rx_filters =
                (1 << HWTSTAMP_FILTER_NONE) |
+               (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
                (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
        return 0;
 }
index 7ecc6b70e7e898a5b0bd052cf60423d942905131..e4d6edf387b34571242b720306602528ddf94fe3 100644 (file)
@@ -645,7 +645,7 @@ EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
 {
        unsigned long flags;
-       int i, reg;
+       int i;
 
        spin_lock_irqsave(&ctlr->lock, flags);
        if (ctlr->state != CPDMA_STATE_ACTIVE) {
@@ -653,9 +653,6 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
                return -EINVAL;
        }
 
-       reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
-       dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
-
        for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
                if (ctlr->channels[i])
                        cpdma_chan_int_ctrl(ctlr->channels[i], enable);
index e6222e535019a076ea0d4cd4c902f12332f079b2..9d52c3a78621b27b355c07bff905a6df3ad5c86b 100644 (file)
@@ -1877,8 +1877,8 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
        return 0;
 }
 
-static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
-                         struct tc_to_netdev *tc)
+static int netcp_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+                         __be16 proto, struct tc_to_netdev *tc)
 {
        u8 num_tc;
        int i;
index 6ebb0f559a427fdb4d27d9b668b46d7151650043..ff626dbde23fface3e916555abeb76ed4f4ced3a 100644 (file)
@@ -212,6 +212,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
        struct genevehdr *gnvh = geneve_hdr(skb);
        struct metadata_dst *tun_dst = NULL;
        struct pcpu_sw_netstats *stats;
+       unsigned int len;
        int err = 0;
        void *oiph;
 
@@ -225,8 +226,10 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
                tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags,
                                         vni_to_tunnel_id(gnvh->vni),
                                         gnvh->opt_len * 4);
-               if (!tun_dst)
+               if (!tun_dst) {
+                       geneve->dev->stats.rx_dropped++;
                        goto drop;
+               }
                /* Update tunnel dst according to Geneve options. */
                ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
                                        gnvh->options, gnvh->opt_len * 4);
@@ -234,8 +237,11 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
                /* Drop packets w/ critical options,
                 * since we don't support any...
                 */
-               if (gnvh->critical)
+               if (gnvh->critical) {
+                       geneve->dev->stats.rx_frame_errors++;
+                       geneve->dev->stats.rx_errors++;
                        goto drop;
+               }
        }
 
        skb_reset_mac_header(skb);
@@ -246,8 +252,10 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
                skb_dst_set(skb, &tun_dst->dst);
 
        /* Ignore packet loops (and multicast echo) */
-       if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
+       if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
+               geneve->dev->stats.rx_errors++;
                goto drop;
+       }
 
        oiph = skb_network_header(skb);
        skb_reset_network_header(skb);
@@ -279,13 +287,15 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
                }
        }
 
-       stats = this_cpu_ptr(geneve->dev->tstats);
-       u64_stats_update_begin(&stats->syncp);
-       stats->rx_packets++;
-       stats->rx_bytes += skb->len;
-       u64_stats_update_end(&stats->syncp);
-
-       gro_cells_receive(&geneve->gro_cells, skb);
+       len = skb->len;
+       err = gro_cells_receive(&geneve->gro_cells, skb);
+       if (likely(err == NET_RX_SUCCESS)) {
+               stats = this_cpu_ptr(geneve->dev->tstats);
+               u64_stats_update_begin(&stats->syncp);
+               stats->rx_packets++;
+               stats->rx_bytes += len;
+               u64_stats_update_end(&stats->syncp);
+       }
        return;
 drop:
        /* Consume bad packet */
@@ -334,7 +344,7 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
        struct geneve_sock *gs;
        int opts_len;
 
-       /* Need Geneve and inner Ethernet header to be present */
+       /* Need UDP and Geneve header to be present */
        if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
                goto drop;
 
@@ -357,8 +367,10 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
        opts_len = geneveh->opt_len * 4;
        if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
                                 htons(ETH_P_TEB),
-                                !net_eq(geneve->net, dev_net(geneve->dev))))
+                                !net_eq(geneve->net, dev_net(geneve->dev)))) {
+               geneve->dev->stats.rx_dropped++;
                goto drop;
+       }
 
        geneve_rx(geneve, gs, skb);
        return 0;
index 262b2ea576a38e4bb7d1c442f2dfcbc2c40fa302..f82d54e0208c32bd31ce9dfd0ddd0f99129c625d 100644 (file)
@@ -763,8 +763,7 @@ struct netvsc_device {
 
        refcount_t sc_offered;
 
-       /* Holds rndis device info */
-       void *extension;
+       struct rndis_device *extension;
 
        int ring_size;
 
index 652453d9fb088fd5103665e12884d87ff08465c3..7c5ed8fe7a4fad77f0f34e40274adf45bc2cb568 100644 (file)
@@ -97,16 +97,6 @@ static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
        call_rcu(&nvdev->rcu, free_netvsc_device);
 }
 
-static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
-{
-       struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
-
-       if (net_device && net_device->destroy)
-               net_device = NULL;
-
-       return net_device;
-}
-
 static void netvsc_destroy_buf(struct hv_device *device)
 {
        struct nvsp_message *revoke_packet;
@@ -243,18 +233,15 @@ static void netvsc_destroy_buf(struct hv_device *device)
        kfree(net_device->send_section_map);
 }
 
-static int netvsc_init_buf(struct hv_device *device)
+static int netvsc_init_buf(struct hv_device *device,
+                          struct netvsc_device *net_device)
 {
        int ret = 0;
-       struct netvsc_device *net_device;
        struct nvsp_message *init_packet;
        struct net_device *ndev;
        size_t map_words;
        int node;
 
-       net_device = get_outbound_net_device(device);
-       if (!net_device)
-               return -ENODEV;
        ndev = hv_get_drvdata(device);
 
        node = cpu_to_node(device->channel->target_cpu);
@@ -285,9 +272,7 @@ static int netvsc_init_buf(struct hv_device *device)
 
        /* Notify the NetVsp of the gpadl handle */
        init_packet = &net_device->channel_init_pkt;
-
        memset(init_packet, 0, sizeof(struct nvsp_message));
-
        init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
        init_packet->msg.v1_msg.send_recv_buf.
                gpadl_handle = net_device->recv_buf_gpadl_handle;
@@ -486,20 +471,15 @@ static int negotiate_nvsp_ver(struct hv_device *device,
        return ret;
 }
 
-static int netvsc_connect_vsp(struct hv_device *device)
+static int netvsc_connect_vsp(struct hv_device *device,
+                             struct netvsc_device *net_device)
 {
-       int ret;
-       struct netvsc_device *net_device;
-       struct nvsp_message *init_packet;
-       int ndis_version;
        const u32 ver_list[] = {
                NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
-               NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
-       int i;
-
-       net_device = get_outbound_net_device(device);
-       if (!net_device)
-               return -ENODEV;
+               NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5
+       };
+       struct nvsp_message *init_packet;
+       int ndis_version, i, ret;
 
        init_packet = &net_device->channel_init_pkt;
 
@@ -549,7 +529,7 @@ static int netvsc_connect_vsp(struct hv_device *device)
                net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
        net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
 
-       ret = netvsc_init_buf(device);
+       ret = netvsc_init_buf(device, net_device);
 
 cleanup:
        return ret;
@@ -843,7 +823,7 @@ int netvsc_send(struct hv_device *device,
                struct hv_page_buffer **pb,
                struct sk_buff *skb)
 {
-       struct netvsc_device *net_device;
+       struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
        int ret = 0;
        struct netvsc_channel *nvchan;
        u32 pktlen = packet->total_data_buflen, msd_len = 0;
@@ -854,15 +834,15 @@ int netvsc_send(struct hv_device *device,
        bool try_batch;
        bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
 
-       net_device = get_outbound_net_device(device);
-       if (!net_device)
+       /* If device is rescinded, return error and packet will get dropped. */
+       if (unlikely(net_device->destroy))
                return -ENODEV;
 
        /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
         * here before the negotiation with the host is finished and
         * send_section_map may not be allocated yet.
         */
-       if (!net_device->send_section_map)
+       if (unlikely(!net_device->send_section_map))
                return -EAGAIN;
 
        nvchan = &net_device->chan_table[packet->q_idx];
@@ -1349,7 +1329,7 @@ int netvsc_device_add(struct hv_device *device,
        rcu_assign_pointer(net_device_ctx->nvdev, net_device);
 
        /* Connect with the NetVsp */
-       ret = netvsc_connect_vsp(device);
+       ret = netvsc_connect_vsp(device, net_device);
        if (ret != 0) {
                netdev_err(ndev,
                        "unable to connect to NetVSP - %d\n", ret);
@@ -1368,4 +1348,5 @@ int netvsc_device_add(struct hv_device *device,
        free_netvsc_device(&net_device->rcu);
 
        return ret;
+
 }
index 2564ac83eb64f70f6d7da6c8d666ea8cb3cbed2b..436a3ad55cfd04e6b245135d4c01ce543dd2e133 100644 (file)
@@ -120,7 +120,7 @@ static int netvsc_close(struct net_device *net)
        struct net_device_context *net_device_ctx = netdev_priv(net);
        struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
        int ret;
-       u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
+       u32 aread, i, msec = 10, retry = 0, retry_max = 20;
        struct vmbus_channel *chn;
 
        netif_tx_disable(net);
@@ -141,15 +141,11 @@ static int netvsc_close(struct net_device *net)
                        if (!chn)
                                continue;
 
-                       hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
-                                                    &awrite);
-
+                       aread = hv_get_bytes_to_read(&chn->inbound);
                        if (aread)
                                break;
 
-                       hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
-                                                    &awrite);
-
+                       aread = hv_get_bytes_to_read(&chn->outbound);
                        if (aread)
                                break;
                }
@@ -345,34 +341,14 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
        return slots_used;
 }
 
-static int count_skb_frag_slots(struct sk_buff *skb)
-{
-       int i, frags = skb_shinfo(skb)->nr_frags;
-       int pages = 0;
-
-       for (i = 0; i < frags; i++) {
-               skb_frag_t *frag = skb_shinfo(skb)->frags + i;
-               unsigned long size = skb_frag_size(frag);
-               unsigned long offset = frag->page_offset;
-
-               /* Skip unused frames from start of page */
-               offset &= ~PAGE_MASK;
-               pages += PFN_UP(offset + size);
-       }
-       return pages;
-}
-
-static int netvsc_get_slots(struct sk_buff *skb)
+/* Estimate number of page buffers neede to transmit
+ * Need at most 2 for RNDIS header plus skb body and fragments.
+ */
+static unsigned int netvsc_get_slots(const struct sk_buff *skb)
 {
-       char *data = skb->data;
-       unsigned int offset = offset_in_page(data);
-       unsigned int len = skb_headlen(skb);
-       int slots;
-       int frag_slots;
-
-       slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
-       frag_slots = count_skb_frag_slots(skb);
-       return slots + frag_slots;
+       return PFN_UP(offset_in_page(skb->data) + skb_headlen(skb))
+               + skb_shinfo(skb)->nr_frags
+               + 2;
 }
 
 static u32 net_checksum_info(struct sk_buff *skb)
@@ -410,21 +386,18 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
        struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
        struct hv_page_buffer *pb = page_buf;
 
-       /* We will atmost need two pages to describe the rndis
-        * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
+       /* We can only transmit MAX_PAGE_BUFFER_COUNT number
         * of pages in a single packet. If skb is scattered around
         * more pages we try linearizing it.
         */
-
-       num_data_pgs = netvsc_get_slots(skb) + 2;
-
+       num_data_pgs = netvsc_get_slots(skb);
        if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
                ++net_device_ctx->eth_stats.tx_scattered;
 
                if (skb_linearize(skb))
                        goto no_memory;
 
-               num_data_pgs = netvsc_get_slots(skb) + 2;
+               num_data_pgs = netvsc_get_slots(skb);
                if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
                        ++net_device_ctx->eth_stats.tx_too_big;
                        goto drop;
index 618ed88fad0fc1d4e227f0e84fde74462b2bc496..e4141d62b5c34b0104c7d205c4038230766b6f5e 100644 (file)
@@ -824,6 +824,33 @@ static int ipvlan_addr6_event(struct notifier_block *unused,
        return NOTIFY_OK;
 }
 
+static int ipvlan_addr6_validator_event(struct notifier_block *unused,
+                                       unsigned long event, void *ptr)
+{
+       struct in6_validator_info *i6vi = (struct in6_validator_info *)ptr;
+       struct net_device *dev = (struct net_device *)i6vi->i6vi_dev->dev;
+       struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+       /* FIXME IPv6 autoconf calls us from bh without RTNL */
+       if (in_softirq())
+               return NOTIFY_DONE;
+
+       if (!netif_is_ipvlan(dev))
+               return NOTIFY_DONE;
+
+       if (!ipvlan || !ipvlan->port)
+               return NOTIFY_DONE;
+
+       switch (event) {
+       case NETDEV_UP:
+               if (ipvlan_addr_busy(ipvlan->port, &i6vi->i6vi_addr, true))
+                       return notifier_from_errno(-EADDRINUSE);
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
 static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
 {
        if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) {
@@ -871,10 +898,37 @@ static int ipvlan_addr4_event(struct notifier_block *unused,
        return NOTIFY_OK;
 }
 
+static int ipvlan_addr4_validator_event(struct notifier_block *unused,
+                                       unsigned long event, void *ptr)
+{
+       struct in_validator_info *ivi = (struct in_validator_info *)ptr;
+       struct net_device *dev = (struct net_device *)ivi->ivi_dev->dev;
+       struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+       if (!netif_is_ipvlan(dev))
+               return NOTIFY_DONE;
+
+       if (!ipvlan || !ipvlan->port)
+               return NOTIFY_DONE;
+
+       switch (event) {
+       case NETDEV_UP:
+               if (ipvlan_addr_busy(ipvlan->port, &ivi->ivi_addr, false))
+                       return notifier_from_errno(-EADDRINUSE);
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
 static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = {
        .notifier_call = ipvlan_addr4_event,
 };
 
+static struct notifier_block ipvlan_addr4_vtor_notifier_block __read_mostly = {
+       .notifier_call = ipvlan_addr4_validator_event,
+};
+
 static struct notifier_block ipvlan_notifier_block __read_mostly = {
        .notifier_call = ipvlan_device_event,
 };
@@ -883,6 +937,10 @@ static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = {
        .notifier_call = ipvlan_addr6_event,
 };
 
+static struct notifier_block ipvlan_addr6_vtor_notifier_block __read_mostly = {
+       .notifier_call = ipvlan_addr6_validator_event,
+};
+
 static void ipvlan_ns_exit(struct net *net)
 {
        struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid);
@@ -907,7 +965,10 @@ static int __init ipvlan_init_module(void)
        ipvlan_init_secret();
        register_netdevice_notifier(&ipvlan_notifier_block);
        register_inet6addr_notifier(&ipvlan_addr6_notifier_block);
+       register_inet6addr_validator_notifier(
+           &ipvlan_addr6_vtor_notifier_block);
        register_inetaddr_notifier(&ipvlan_addr4_notifier_block);
+       register_inetaddr_validator_notifier(&ipvlan_addr4_vtor_notifier_block);
 
        err = register_pernet_subsys(&ipvlan_net_ops);
        if (err < 0)
@@ -922,7 +983,11 @@ static int __init ipvlan_init_module(void)
        return 0;
 error:
        unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
+       unregister_inetaddr_validator_notifier(
+           &ipvlan_addr4_vtor_notifier_block);
        unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
+       unregister_inet6addr_validator_notifier(
+           &ipvlan_addr6_vtor_notifier_block);
        unregister_netdevice_notifier(&ipvlan_notifier_block);
        return err;
 }
@@ -933,7 +998,11 @@ static void __exit ipvlan_cleanup_module(void)
        unregister_pernet_subsys(&ipvlan_net_ops);
        unregister_netdevice_notifier(&ipvlan_notifier_block);
        unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
+       unregister_inetaddr_validator_notifier(
+           &ipvlan_addr4_vtor_notifier_block);
        unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
+       unregister_inet6addr_validator_notifier(
+           &ipvlan_addr6_vtor_notifier_block);
 }
 
 module_init(ipvlan_init_module);
index 92578d72e4ee51ce26d3e23d4b86099a0097801d..63a8ff816e5917983d7c095696492175d7160a77 100644 (file)
@@ -886,7 +886,7 @@ static int marvell_read_link(struct mii_phy *phy)
         SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
 
 /* Broadcom BCM 5201 */
-static struct mii_phy_ops bcm5201_phy_ops = {
+static const struct mii_phy_ops bcm5201_phy_ops = {
        .init           = bcm5201_init,
        .suspend        = bcm5201_suspend,
        .setup_aneg     = genmii_setup_aneg,
@@ -905,7 +905,7 @@ static struct mii_phy_def bcm5201_phy_def = {
 };
 
 /* Broadcom BCM 5221 */
-static struct mii_phy_ops bcm5221_phy_ops = {
+static const struct mii_phy_ops bcm5221_phy_ops = {
        .suspend        = bcm5221_suspend,
        .init           = bcm5221_init,
        .setup_aneg     = genmii_setup_aneg,
@@ -924,7 +924,7 @@ static struct mii_phy_def bcm5221_phy_def = {
 };
 
 /* Broadcom BCM 5241 */
-static struct mii_phy_ops bcm5241_phy_ops = {
+static const struct mii_phy_ops bcm5241_phy_ops = {
        .suspend        = bcm5241_suspend,
        .init           = bcm5241_init,
        .setup_aneg     = genmii_setup_aneg,
@@ -942,7 +942,7 @@ static struct mii_phy_def bcm5241_phy_def = {
 };
 
 /* Broadcom BCM 5400 */
-static struct mii_phy_ops bcm5400_phy_ops = {
+static const struct mii_phy_ops bcm5400_phy_ops = {
        .init           = bcm5400_init,
        .suspend        = bcm5400_suspend,
        .setup_aneg     = bcm54xx_setup_aneg,
@@ -961,7 +961,7 @@ static struct mii_phy_def bcm5400_phy_def = {
 };
 
 /* Broadcom BCM 5401 */
-static struct mii_phy_ops bcm5401_phy_ops = {
+static const struct mii_phy_ops bcm5401_phy_ops = {
        .init           = bcm5401_init,
        .suspend        = bcm5401_suspend,
        .setup_aneg     = bcm54xx_setup_aneg,
@@ -980,7 +980,7 @@ static struct mii_phy_def bcm5401_phy_def = {
 };
 
 /* Broadcom BCM 5411 */
-static struct mii_phy_ops bcm5411_phy_ops = {
+static const struct mii_phy_ops bcm5411_phy_ops = {
        .init           = bcm5411_init,
        .suspend        = generic_suspend,
        .setup_aneg     = bcm54xx_setup_aneg,
@@ -999,7 +999,7 @@ static struct mii_phy_def bcm5411_phy_def = {
 };
 
 /* Broadcom BCM 5421 */
-static struct mii_phy_ops bcm5421_phy_ops = {
+static const struct mii_phy_ops bcm5421_phy_ops = {
        .init           = bcm5421_init,
        .suspend        = generic_suspend,
        .setup_aneg     = bcm54xx_setup_aneg,
@@ -1019,7 +1019,7 @@ static struct mii_phy_def bcm5421_phy_def = {
 };
 
 /* Broadcom BCM 5421 built-in K2 */
-static struct mii_phy_ops bcm5421k2_phy_ops = {
+static const struct mii_phy_ops bcm5421k2_phy_ops = {
        .init           = bcm5421_init,
        .suspend        = generic_suspend,
        .setup_aneg     = bcm54xx_setup_aneg,
@@ -1037,7 +1037,7 @@ static struct mii_phy_def bcm5421k2_phy_def = {
        .ops            = &bcm5421k2_phy_ops
 };
 
-static struct mii_phy_ops bcm5461_phy_ops = {
+static const struct mii_phy_ops bcm5461_phy_ops = {
        .init           = bcm5421_init,
        .suspend        = generic_suspend,
        .setup_aneg     = bcm54xx_setup_aneg,
@@ -1057,7 +1057,7 @@ static struct mii_phy_def bcm5461_phy_def = {
 };
 
 /* Broadcom BCM 5462 built-in Vesta */
-static struct mii_phy_ops bcm5462V_phy_ops = {
+static const struct mii_phy_ops bcm5462V_phy_ops = {
        .init           = bcm5421_init,
        .suspend        = generic_suspend,
        .setup_aneg     = bcm54xx_setup_aneg,
@@ -1076,7 +1076,7 @@ static struct mii_phy_def bcm5462V_phy_def = {
 };
 
 /* Marvell 88E1101 amd 88E1111 */
-static struct mii_phy_ops marvell88e1101_phy_ops = {
+static const struct mii_phy_ops marvell88e1101_phy_ops = {
        .suspend        = generic_suspend,
        .setup_aneg     = marvell_setup_aneg,
        .setup_forced   = marvell_setup_forced,
@@ -1084,7 +1084,7 @@ static struct mii_phy_ops marvell88e1101_phy_ops = {
        .read_link      = marvell_read_link
 };
 
-static struct mii_phy_ops marvell88e1111_phy_ops = {
+static const struct mii_phy_ops marvell88e1111_phy_ops = {
        .init           = marvell88e1111_init,
        .suspend        = generic_suspend,
        .setup_aneg     = marvell_setup_aneg,
@@ -1122,7 +1122,7 @@ static struct mii_phy_def marvell88e1111_phy_def = {
 };
 
 /* Generic implementation for most 10/100 PHYs */
-static struct mii_phy_ops generic_phy_ops = {
+static const struct mii_phy_ops generic_phy_ops = {
        .setup_aneg     = genmii_setup_aneg,
        .setup_forced   = genmii_setup_forced,
        .poll_link      = genmii_poll_link,
index 6c5d5ef46f75aa9a9089ac80bbee30a7f579b016..a3ec1892a2862d9df9ab7d906f6c47931944fecc 100644 (file)
@@ -2005,12 +2005,6 @@ static const struct net_device_ops team_netdev_ops = {
        .ndo_del_slave          = team_del_slave,
        .ndo_fix_features       = team_fix_features,
        .ndo_change_carrier     = team_change_carrier,
-       .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
-       .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
-       .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
-       .ndo_fdb_add            = switchdev_port_fdb_add,
-       .ndo_fdb_del            = switchdev_port_fdb_del,
-       .ndo_fdb_dump           = switchdev_port_fdb_dump,
        .ndo_features_check     = passthru_features_check,
 };
 
index fd31fab2a9daebb842784220a3a1950221e1e759..5a02053181d10d726869d013b35109c823a2b5e5 100644 (file)
 
 /* OCP_PHY_STATUS */
 #define PHY_STAT_MASK          0x0007
+#define PHY_STAT_EXT_INIT      2
 #define PHY_STAT_LAN_ON                3
 #define PHY_STAT_PWRDN         5
 
@@ -1812,6 +1813,10 @@ static int rx_bottom(struct r8152 *tp, int budget)
                        unsigned int pkt_len;
                        struct sk_buff *skb;
 
+                       /* limite the skb numbers for rx_queue */
+                       if (unlikely(skb_queue_len(&tp->rx_queue) >= 1000))
+                               break;
+
                        pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
                        if (pkt_len < ETH_ZLEN)
                                break;
@@ -1933,7 +1938,8 @@ static int r8152_poll(struct napi_struct *napi, int budget)
        bottom_half(tp);
 
        if (work_done < budget) {
-               napi_complete(napi);
+               if (!napi_complete_done(napi, work_done))
+                       goto out;
                if (!list_empty(&tp->rx_done))
                        napi_schedule(napi);
                else if (!skb_queue_empty(&tp->tx_queue) &&
@@ -1941,6 +1947,7 @@ static int r8152_poll(struct napi_struct *napi, int budget)
                        napi_schedule(napi);
        }
 
+out:
        return work_done;
 }
 
@@ -2262,7 +2269,6 @@ static int rtl8153_enable(struct r8152 *tp)
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return -ENODEV;
 
-       usb_disable_lpm(tp->udev);
        set_tx_qlen(tp);
        rtl_set_eee_plus(tp);
        r8153_set_rx_early_timeout(tp);
@@ -2428,6 +2434,29 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
                device_set_wakeup_enable(&tp->udev->dev, false);
 }
 
+static void r8153_mac_clk_spd(struct r8152 *tp, bool enable)
+{
+       /* MAC clock speed down */
+       if (enable) {
+               ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL,
+                              ALDPS_SPDWN_RATIO);
+               ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2,
+                              EEE_SPDWN_RATIO);
+               ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
+                              PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
+                              U1U2_SPDWN_EN | L1_SPDWN_EN);
+               ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
+                              PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
+                              TP100_SPDWN_EN | TP500_SPDWN_EN | EEE_SPDWN_EN |
+                              TP1000_SPDWN_EN);
+       } else {
+               ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
+               ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
+               ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
+               ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
+       }
+}
+
 static void r8153_u1u2en(struct r8152 *tp, bool enable)
 {
        u8 u1u2[8];
@@ -2445,13 +2474,35 @@ static void r8153_u2p3en(struct r8152 *tp, bool enable)
        u32 ocp_data;
 
        ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
-       if (enable && tp->version != RTL_VER_03 && tp->version != RTL_VER_04)
+       if (enable)
                ocp_data |= U2P3_ENABLE;
        else
                ocp_data &= ~U2P3_ENABLE;
        ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
 }
 
+static u16 r8153_phy_status(struct r8152 *tp, u16 desired)
+{
+       u16 data;
+       int i;
+
+       for (i = 0; i < 500; i++) {
+               data = ocp_reg_read(tp, OCP_PHY_STATUS);
+               data &= PHY_STAT_MASK;
+               if (desired) {
+                       if (data == desired)
+                               break;
+               } else if (data == PHY_STAT_LAN_ON || data == PHY_STAT_PWRDN ||
+                          data == PHY_STAT_EXT_INIT) {
+                       break;
+               }
+
+               msleep(20);
+       }
+
+       return data;
+}
+
 static void r8153_power_cut_en(struct r8152 *tp, bool enable)
 {
        u32 ocp_data;
@@ -2506,13 +2557,26 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
 
 static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
 {
-       rtl_runtime_suspend_enable(tp, enable);
-
        if (enable) {
                r8153_u1u2en(tp, false);
                r8153_u2p3en(tp, false);
+               r8153_mac_clk_spd(tp, true);
+               rtl_runtime_suspend_enable(tp, true);
        } else {
-               r8153_u2p3en(tp, true);
+               rtl_runtime_suspend_enable(tp, false);
+               r8153_mac_clk_spd(tp, false);
+
+               switch (tp->version) {
+               case RTL_VER_03:
+               case RTL_VER_04:
+                       break;
+               case RTL_VER_05:
+               case RTL_VER_06:
+               default:
+                       r8153_u2p3en(tp, true);
+                       break;
+               }
+
                r8153_u1u2en(tp, true);
        }
 }
@@ -2778,9 +2842,15 @@ static void r8153_aldps_en(struct r8152 *tp, bool enable)
                data |= EN_ALDPS;
                ocp_reg_write(tp, OCP_POWER_CFG, data);
        } else {
+               int i;
+
                data &= ~EN_ALDPS;
                ocp_reg_write(tp, OCP_POWER_CFG, data);
-               msleep(20);
+               for (i = 0; i < 20; i++) {
+                       usleep_range(1000, 2000);
+                       if (ocp_read_word(tp, MCU_TYPE_PLA, 0xe000) & 0x0100)
+                               break;
+               }
        }
 }
 
@@ -2851,6 +2921,17 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
        r8153_aldps_en(tp, true);
        r8152b_enable_fc(tp);
 
+       switch (tp->version) {
+       case RTL_VER_03:
+       case RTL_VER_04:
+               break;
+       case RTL_VER_05:
+       case RTL_VER_06:
+       default:
+               r8153_u2p3en(tp, true);
+               break;
+       }
+
        set_bit(PHY_RESET, &tp->flags);
 }
 
@@ -2859,6 +2940,7 @@ static void r8153_first_init(struct r8152 *tp)
        u32 ocp_data;
        int i;
 
+       r8153_mac_clk_spd(tp, false);
        rxdy_gated_en(tp, true);
        r8153_teredo_off(tp);
 
@@ -2913,11 +2995,6 @@ static void r8153_first_init(struct r8152 *tp)
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_NORMAL);
        /* TX share fifo free credit full threshold */
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL2);
-
-       /* rx aggregation */
-       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
-       ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
-       ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
 }
 
 static void r8153_enter_oob(struct r8152 *tp)
@@ -2925,6 +3002,8 @@ static void r8153_enter_oob(struct r8152 *tp)
        u32 ocp_data;
        int i;
 
+       r8153_mac_clk_spd(tp, true);
+
        ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
        ocp_data &= ~NOW_IS_OOB;
        ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
@@ -2980,7 +3059,6 @@ static void rtl8153_disable(struct r8152 *tp)
        rtl_disable(tp);
        rtl_reset_bmu(tp);
        r8153_aldps_en(tp, true);
-       usb_enable_lpm(tp->udev);
 }
 
 static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
@@ -3099,12 +3177,23 @@ static void rtl8153_up(struct r8152 *tp)
                return;
 
        r8153_u1u2en(tp, false);
+       r8153_u2p3en(tp, false);
        r8153_aldps_en(tp, false);
        r8153_first_init(tp);
        r8153_aldps_en(tp, true);
-       r8153_u2p3en(tp, true);
+
+       switch (tp->version) {
+       case RTL_VER_03:
+       case RTL_VER_04:
+               break;
+       case RTL_VER_05:
+       case RTL_VER_06:
+       default:
+               r8153_u2p3en(tp, true);
+               break;
+       }
+
        r8153_u1u2en(tp, true);
-       usb_enable_lpm(tp->udev);
 }
 
 static void rtl8153_down(struct r8152 *tp)
@@ -3420,12 +3509,7 @@ static void r8153_init(struct r8152 *tp)
                msleep(20);
        }
 
-       for (i = 0; i < 500; i++) {
-               ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK;
-               if (ocp_data == PHY_STAT_LAN_ON || ocp_data == PHY_STAT_PWRDN)
-                       break;
-               msleep(20);
-       }
+       data = r8153_phy_status(tp, 0);
 
        if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
            tp->version == RTL_VER_05)
@@ -3437,14 +3521,8 @@ static void r8153_init(struct r8152 *tp)
                r8152_mdio_write(tp, MII_BMCR, data);
        }
 
-       for (i = 0; i < 500; i++) {
-               ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK;
-               if (ocp_data == PHY_STAT_LAN_ON)
-                       break;
-               msleep(20);
-       }
+       data = r8153_phy_status(tp, PHY_STAT_LAN_ON);
 
-       usb_disable_lpm(tp->udev);
        r8153_u2p3en(tp, false);
 
        if (tp->version == RTL_VER_04) {
@@ -3504,15 +3582,28 @@ static void r8153_init(struct r8152 *tp)
 
        r8153_power_cut_en(tp, false);
        r8153_u1u2en(tp, true);
+       r8153_mac_clk_spd(tp, false);
+       usb_enable_lpm(tp->udev);
 
-       /* MAC clock speed down */
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
+       /* rx aggregation */
+       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
+       ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
+       ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
 
        rtl_tally_reset(tp);
-       r8153_u2p3en(tp, true);
+
+       switch (tp->udev->speed) {
+       case USB_SPEED_SUPER:
+       case USB_SPEED_SUPER_PLUS:
+               tp->coalesce = COALESCE_SUPER;
+               break;
+       case USB_SPEED_HIGH:
+               tp->coalesce = COALESCE_HIGH;
+               break;
+       default:
+               tp->coalesce = COALESCE_SLOW;
+               break;
+       }
 }
 
 static int rtl8152_pre_reset(struct usb_interface *intf)
@@ -3697,11 +3788,8 @@ static int rtl8152_resume(struct usb_interface *intf)
 
        mutex_lock(&tp->control);
 
-       if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
-               tp->rtl_ops.init(tp);
-               queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
+       if (!test_bit(SELECTIVE_SUSPEND, &tp->flags))
                netif_device_attach(netdev);
-       }
 
        if (netif_running(netdev) && netdev->flags & IFF_UP) {
                if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
@@ -3747,6 +3835,10 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
        struct r8152 *tp = usb_get_intfdata(intf);
 
        clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+       mutex_lock(&tp->control);
+       tp->rtl_ops.init(tp);
+       queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
+       mutex_unlock(&tp->control);
        return rtl8152_resume(intf);
 }
 
@@ -4458,19 +4550,6 @@ static int rtl8152_probe(struct usb_interface *intf,
        tp->mii.reg_num_mask = 0x1f;
        tp->mii.phy_id = R8152_PHY_ID;
 
-       switch (udev->speed) {
-       case USB_SPEED_SUPER:
-       case USB_SPEED_SUPER_PLUS:
-               tp->coalesce = COALESCE_SUPER;
-               break;
-       case USB_SPEED_HIGH:
-               tp->coalesce = COALESCE_HIGH;
-               break;
-       default:
-               tp->coalesce = COALESCE_SLOW;
-               break;
-       }
-
        tp->autoneg = AUTONEG_ENABLE;
        tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100;
        tp->duplex = DUPLEX_FULL;
index d5990eb160bdf49a3a916c0195d04fbe521ef2b2..02781e78ffb69683d529a2930fa2f6b3d552e64a 100644 (file)
@@ -341,6 +341,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
        struct msghdr msg;
        struct kvec iov[1];
        size_t offset;
+       s64 tx_total_len;
        u32 abort_code;
        int ret;
 
@@ -364,9 +365,20 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
        srx.transport.sin.sin_port = call->port;
        memcpy(&srx.transport.sin.sin_addr, addr, 4);
 
+       /* Work out the length we're going to transmit.  This is awkward for
+        * calls such as FS.StoreData where there's an extra injection of data
+        * after the initial fixed part.
+        */
+       tx_total_len = call->request_size;
+       if (call->send_pages) {
+               tx_total_len += call->last_to - call->first_offset;
+               tx_total_len += (call->last - call->first) * PAGE_SIZE;
+       }
+
        /* create a call */
        rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key,
-                                        (unsigned long) call, gfp,
+                                        (unsigned long)call,
+                                        tx_total_len, gfp,
                                         (async ?
                                          afs_wake_up_async_call :
                                          afs_wake_up_call_waiter));
@@ -738,6 +750,8 @@ void afs_send_empty_reply(struct afs_call *call)
 
        _enter("");
 
+       rxrpc_kernel_set_tx_length(afs_socket, call->rxcall, 0);
+
        msg.msg_name            = NULL;
        msg.msg_namelen         = 0;
        iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, NULL, 0, 0);
@@ -772,6 +786,8 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
 
        _enter("");
 
+       rxrpc_kernel_set_tx_length(afs_socket, call->rxcall, len);
+
        iov[0].iov_base         = (void *) buf;
        iov[0].iov_len          = len;
        msg.msg_name            = NULL;
index a2e9d6ea1349fb85418a9ebafc55dd08d16ca6b0..e7c04c4e4bcd3e538b365b138a848df9ed96ede6 100644 (file)
@@ -150,8 +150,15 @@ struct in_ifaddr {
        unsigned long           ifa_tstamp; /* updated timestamp */
 };
 
+struct in_validator_info {
+       __be32                  ivi_addr;
+       struct in_device        *ivi_dev;
+};
+
 int register_inetaddr_notifier(struct notifier_block *nb);
 int unregister_inetaddr_notifier(struct notifier_block *nb);
+int register_inetaddr_validator_notifier(struct notifier_block *nb);
+int unregister_inetaddr_validator_notifier(struct notifier_block *nb);
 
 void inet_netconf_notify_devconf(struct net *net, int event, int type,
                                 int ifindex, struct ipv4_devconf *devconf);
index c50c9218e31e06a98ef606967c973cdd10511460..524c7776ce96b2c661364b3297b807664338841e 100644 (file)
@@ -972,7 +972,7 @@ struct xfrmdev_ops {
  *      with PF and querying it may introduce a theoretical security risk.
  * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
  * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
- * int (*ndo_setup_tc)(struct net_device *dev, u32 handle,
+ * int (*ndo_setup_tc)(struct net_device *dev, u32 handle, u32 chain_index,
  *                    __be16 protocol, struct tc_to_netdev *tc);
  *     Called to setup any 'tc' scheduler, classifier or action on @dev.
  *     This is always called from the stack with the rtnl lock held and netif
@@ -1222,7 +1222,7 @@ struct net_device_ops {
                                                   struct net_device *dev,
                                                   int vf, bool setting);
        int                     (*ndo_setup_tc)(struct net_device *dev,
-                                               u32 handle,
+                                               u32 handle, u32 chain_index,
                                                __be16 protocol,
                                                struct tc_to_netdev *tc);
 #if IS_ENABLED(CONFIG_FCOE)
index 4fb4666ea879c6e8f8b9da42cce9f8217e1681d9..5958b45eb6996cedfee6466b201a0fd3c4a70046 100644 (file)
 #include <linux/slab.h>
 #include <linux/qed/qed_if.h>
 
+enum qed_ll2_conn_type {
+       QED_LL2_TYPE_FCOE,
+       QED_LL2_TYPE_ISCSI,
+       QED_LL2_TYPE_TEST,
+       QED_LL2_TYPE_ISCSI_OOO,
+       QED_LL2_TYPE_RESERVED2,
+       QED_LL2_TYPE_ROCE,
+       QED_LL2_TYPE_RESERVED3,
+       MAX_QED_LL2_RX_CONN_TYPE
+};
+
+enum qed_ll2_roce_flavor_type {
+       QED_LL2_ROCE,
+       QED_LL2_RROCE,
+       MAX_QED_LL2_ROCE_FLAVOR_TYPE
+};
+
+enum qed_ll2_tx_dest {
+       QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */
+       QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */
+       QED_LL2_TX_DEST_MAX
+};
+
+enum qed_ll2_error_handle {
+       QED_LL2_DROP_PACKET,
+       QED_LL2_DO_NOTHING,
+       QED_LL2_ASSERT,
+};
+
 struct qed_ll2_stats {
        u64 gsi_invalid_hdr;
        u64 gsi_invalid_pkt_length;
@@ -67,6 +96,105 @@ struct qed_ll2_stats {
        u64 sent_bcast_pkts;
 };
 
+struct qed_ll2_comp_rx_data {
+       void *cookie;
+       dma_addr_t rx_buf_addr;
+       u16 parse_flags;
+       u16 vlan;
+       bool b_last_packet;
+       u8 connection_handle;
+
+       union {
+               u16 packet_length;
+               u16 data_length;
+       } length;
+
+       u32 opaque_data_0;
+       u32 opaque_data_1;
+
+       /* GSI only */
+       u32 gid_dst[4];
+       u16 qp_id;
+
+       union {
+               u8 placement_offset;
+               u8 data_length_error;
+       } u;
+};
+
+typedef
+void (*qed_ll2_complete_rx_packet_cb)(void *cxt,
+                                     struct qed_ll2_comp_rx_data *data);
+
+typedef
+void (*qed_ll2_release_rx_packet_cb)(void *cxt,
+                                    u8 connection_handle,
+                                    void *cookie,
+                                    dma_addr_t rx_buf_addr,
+                                    bool b_last_packet);
+
+typedef
+void (*qed_ll2_complete_tx_packet_cb)(void *cxt,
+                                     u8 connection_handle,
+                                     void *cookie,
+                                     dma_addr_t first_frag_addr,
+                                     bool b_last_fragment,
+                                     bool b_last_packet);
+
+typedef
+void (*qed_ll2_release_tx_packet_cb)(void *cxt,
+                                    u8 connection_handle,
+                                    void *cookie,
+                                    dma_addr_t first_frag_addr,
+                                    bool b_last_fragment, bool b_last_packet);
+
+struct qed_ll2_cbs {
+       qed_ll2_complete_rx_packet_cb rx_comp_cb;
+       qed_ll2_release_rx_packet_cb rx_release_cb;
+       qed_ll2_complete_tx_packet_cb tx_comp_cb;
+       qed_ll2_release_tx_packet_cb tx_release_cb;
+       void *cookie;
+};
+
+struct qed_ll2_acquire_data_inputs {
+       enum qed_ll2_conn_type conn_type;
+       u16 mtu;
+       u16 rx_num_desc;
+       u16 rx_num_ooo_buffers;
+       u8 rx_drop_ttl0_flg;
+       u8 rx_vlan_removal_en;
+       u16 tx_num_desc;
+       u8 tx_max_bds_per_packet;
+       u8 tx_tc;
+       enum qed_ll2_tx_dest tx_dest;
+       enum qed_ll2_error_handle ai_err_packet_too_big;
+       enum qed_ll2_error_handle ai_err_no_buf;
+       u8 gsi_enable;
+};
+
+struct qed_ll2_acquire_data {
+       struct qed_ll2_acquire_data_inputs input;
+       const struct qed_ll2_cbs *cbs;
+
+       /* Output container for LL2 connection's handle */
+       u8 *p_connection_handle;
+};
+
+struct qed_ll2_tx_pkt_info {
+       void *cookie;
+       dma_addr_t first_frag;
+       enum qed_ll2_tx_dest tx_dest;
+       enum qed_ll2_roce_flavor_type qed_roce_flavor;
+       u16 vlan;
+       u16 l4_hdr_offset_w;    /* from start of packet */
+       u16 first_frag_len;
+       u8 num_of_bds;
+       u8 bd_flags;
+       bool enable_ip_cksum;
+       bool enable_l4_cksum;
+       bool calc_ip_len;
+};
+
 #define QED_LL2_UNUSED_HANDLE   (0xff)
 
 struct qed_ll2_cb_ops {
index cbb2ff0ce4bc34c5179a5ad77167ac0b721f6a30..8e70f5ee05afd1da436fbd37bbf4641a11883537 100644 (file)
@@ -34,8 +34,6 @@
 #include <linux/types.h>
 #include <linux/delay.h>
 #include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/qed/qed_if.h>
 #include <linux/qed/qed_ll2_if.h>
@@ -491,42 +489,6 @@ struct qed_roce_ll2_packet {
        enum qed_roce_ll2_tx_dest tx_dest;
 };
 
-struct qed_roce_ll2_tx_params {
-       int reserved;
-};
-
-struct qed_roce_ll2_rx_params {
-       u16 vlan_id;
-       u8 smac[ETH_ALEN];
-       int rc;
-};
-
-struct qed_roce_ll2_cbs {
-       void (*tx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt);
-
-       void (*rx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt,
-                     struct qed_roce_ll2_rx_params *params);
-};
-
-struct qed_roce_ll2_params {
-       u16 max_rx_buffers;
-       u16 max_tx_buffers;
-       u16 mtu;
-       u8 mac_address[ETH_ALEN];
-       struct qed_roce_ll2_cbs cbs;
-       void *cb_cookie;
-};
-
-struct qed_roce_ll2_info {
-       u8 handle;
-       struct qed_roce_ll2_cbs cbs;
-       u8 mac_address[ETH_ALEN];
-       void *cb_cookie;
-
-       /* Lock to protect ll2 */
-       struct mutex lock;
-};
-
 enum qed_rdma_type {
        QED_RDMA_TYPE_ROCE,
 };
@@ -579,26 +541,40 @@ struct qed_rdma_ops {
        int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp,
                             struct qed_rdma_query_qp_out_params *oparams);
        int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp);
+
        int
        (*rdma_register_tid)(void *rdma_cxt,
                             struct qed_rdma_register_tid_in_params *iparams);
+
        int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid);
        int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
        void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
-       int (*roce_ll2_start)(struct qed_dev *cdev,
-                             struct qed_roce_ll2_params *params);
-       int (*roce_ll2_stop)(struct qed_dev *cdev);
-       int (*roce_ll2_tx)(struct qed_dev *cdev,
-                          struct qed_roce_ll2_packet *packet,
-                          struct qed_roce_ll2_tx_params *params);
-       int (*roce_ll2_post_rx_buffer)(struct qed_dev *cdev,
-                                      struct qed_roce_ll2_buffer *buf,
-                                      u64 cookie, u8 notify_fw);
-       int (*roce_ll2_set_mac_filter)(struct qed_dev *cdev,
-                                      u8 *old_mac_address,
-                                      u8 *new_mac_address);
-       int (*roce_ll2_stats)(struct qed_dev *cdev,
-                             struct qed_ll2_stats *stats);
+
+       int (*ll2_acquire_connection)(void *rdma_cxt,
+                                     struct qed_ll2_acquire_data *data);
+
+       int (*ll2_establish_connection)(void *rdma_cxt, u8 connection_handle);
+       int (*ll2_terminate_connection)(void *rdma_cxt, u8 connection_handle);
+       void (*ll2_release_connection)(void *rdma_cxt, u8 connection_handle);
+
+       int (*ll2_prepare_tx_packet)(void *rdma_cxt,
+                                    u8 connection_handle,
+                                    struct qed_ll2_tx_pkt_info *pkt,
+                                    bool notify_fw);
+
+       int (*ll2_set_fragment_of_tx_packet)(void *rdma_cxt,
+                                            u8 connection_handle,
+                                            dma_addr_t addr,
+                                            u16 nbytes);
+       int (*ll2_post_rx_buffer)(void *rdma_cxt, u8 connection_handle,
+                                 dma_addr_t addr, u16 buf_len, void *cookie,
+                                 u8 notify_fw);
+       int (*ll2_get_stats)(void *rdma_cxt,
+                            u8 connection_handle,
+                            struct qed_ll2_stats *p_stats);
+       int (*ll2_set_mac_filter)(struct qed_dev *cdev,
+                                 u8 *old_mac_address, u8 *new_mac_address);
+
 };
 
 const struct qed_rdma_ops *qed_get_rdma_ops(void);
index 707910c6c6c5ec0258e2d8377125b99a4fe7bb59..7343f71783dce943d5d884e158efecf1f11f9d87 100644 (file)
@@ -38,6 +38,7 @@ struct sockaddr_rxrpc {
 #define RXRPC_EXCLUSIVE_CONNECTION     3       /* Deprecated; use RXRPC_EXCLUSIVE_CALL instead */
 #define RXRPC_MIN_SECURITY_LEVEL       4       /* minimum security level */
 #define RXRPC_UPGRADEABLE_SERVICE      5       /* Upgrade service[0] -> service[1] */
+#define RXRPC_SUPPORTED_CMSG           6       /* Get highest supported control message type */
 
 /*
  * RxRPC control messages
@@ -45,16 +46,20 @@ struct sockaddr_rxrpc {
  * - terminal messages mean that a user call ID tag can be recycled
  * - s/r/- indicate whether these are applicable to sendmsg() and/or recvmsg()
  */
-#define RXRPC_USER_CALL_ID     1       /* sr: user call ID specifier */
-#define RXRPC_ABORT            2       /* sr: abort request / notification [terminal] */
-#define RXRPC_ACK              3       /* -r: [Service] RPC op final ACK received [terminal] */
-#define RXRPC_NET_ERROR                5       /* -r: network error received [terminal] */
-#define RXRPC_BUSY             6       /* -r: server busy received [terminal] */
-#define RXRPC_LOCAL_ERROR      7       /* -r: local error generated [terminal] */
-#define RXRPC_NEW_CALL         8       /* -r: [Service] new incoming call notification */
-#define RXRPC_ACCEPT           9       /* s-: [Service] accept request */
-#define RXRPC_EXCLUSIVE_CALL   10      /* s-: Call should be on exclusive connection */
-#define RXRPC_UPGRADE_SERVICE  11      /* s-: Request service upgrade for client call */
+enum rxrpc_cmsg_type {
+       RXRPC_USER_CALL_ID      = 1,    /* sr: user call ID specifier */
+       RXRPC_ABORT             = 2,    /* sr: abort request / notification [terminal] */
+       RXRPC_ACK               = 3,    /* -r: [Service] RPC op final ACK received [terminal] */
+       RXRPC_NET_ERROR         = 5,    /* -r: network error received [terminal] */
+       RXRPC_BUSY              = 6,    /* -r: server busy received [terminal] */
+       RXRPC_LOCAL_ERROR       = 7,    /* -r: local error generated [terminal] */
+       RXRPC_NEW_CALL          = 8,    /* -r: [Service] new incoming call notification */
+       RXRPC_ACCEPT            = 9,    /* s-: [Service] accept request */
+       RXRPC_EXCLUSIVE_CALL    = 10,   /* s-: Call should be on exclusive connection */
+       RXRPC_UPGRADE_SERVICE   = 11,   /* s-: Request service upgrade for client call */
+       RXRPC_TX_LENGTH         = 12,   /* s-: Total length of Tx data */
+       RXRPC__SUPPORTED
+};
 
 /*
  * RxRPC security levels
index b43a4eec3ceca4f798bf7513ecd4aa999535c6e7..d0889cb501726685e56a2d6cf333ab55c2d0d48a 100644 (file)
@@ -48,11 +48,15 @@ struct prefix_info {
        struct in6_addr         prefix;
 };
 
-
 #include <linux/netdevice.h>
 #include <net/if_inet6.h>
 #include <net/ipv6.h>
 
+struct in6_validator_info {
+       struct in6_addr         i6vi_addr;
+       struct inet6_dev        *i6vi_dev;
+};
+
 #define IN6_ADDR_HSIZE_SHIFT   4
 #define IN6_ADDR_HSIZE         (1 << IN6_ADDR_HSIZE_SHIFT)
 
@@ -278,6 +282,10 @@ int register_inet6addr_notifier(struct notifier_block *nb);
 int unregister_inet6addr_notifier(struct notifier_block *nb);
 int inet6addr_notifier_call_chain(unsigned long val, void *v);
 
+int register_inet6addr_validator_notifier(struct notifier_block *nb);
+int unregister_inet6addr_validator_notifier(struct notifier_block *nb);
+int inet6addr_validator_notifier_call_chain(unsigned long val, void *v);
+
 void inet6_netconf_notify_devconf(struct net *net, int event, int type,
                                  int ifindex, struct ipv6_devconf *devconf);
 
index b5f5187f488cc1e663912ec1e12811d49ecb3fd5..c172709787af316efaee4cfa94cc185ca989b5fb 100644 (file)
@@ -33,6 +33,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
                                           struct sockaddr_rxrpc *,
                                           struct key *,
                                           unsigned long,
+                                          s64,
                                           gfp_t,
                                           rxrpc_notify_rx_t);
 int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
@@ -46,5 +47,6 @@ void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
                           struct sockaddr_rxrpc *);
 int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
                               rxrpc_user_attach_call_t, unsigned long, gfp_t);
+void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
 
 #endif /* _NET_RXRPC_H */
index cd686c4fb32dc5409a08f818d48228bffa6f6778..9a14a0850b0e3601194479b4e1a433dc817e088e 100644 (file)
@@ -122,6 +122,9 @@ struct netns_ipv4 {
        int sysctl_tcp_fin_timeout;
        unsigned int sysctl_tcp_notsent_lowat;
        int sysctl_tcp_tw_reuse;
+       int sysctl_tcp_sack;
+       int sysctl_tcp_window_scaling;
+       int sysctl_tcp_timestamps;
        struct inet_timewait_death_row tcp_death_row;
        int sysctl_max_syn_backlog;
 
index b94006f6fbdde0d78fe33b9c2d86159e291c30cf..031bf16d15218329be98b1fb8c3f3e891a6f86e3 100644 (file)
@@ -8,10 +8,11 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
                               __be16 dport);
 u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
                   __be16 sport, __be16 dport);
-u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr);
+u32 secure_tcp_ts_off(const struct net *net, __be32 saddr, __be32 daddr);
 u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
                     __be16 sport, __be16 dport);
-u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr);
+u32 secure_tcpv6_ts_off(const struct net *net,
+                       const __be32 *saddr, const __be32 *daddr);
 u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
                                __be16 sport, __be16 dport);
 u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
index 3467d9e89e7dba1c35fa44a6268a28735f795319..858891c36f94ad2577726d6d21cf871dbcd55d98 100644 (file)
@@ -1080,6 +1080,7 @@ struct proto {
        bool                    (*stream_memory_free)(const struct sock *sk);
        /* Memory pressure */
        void                    (*enter_memory_pressure)(struct sock *sk);
+       void                    (*leave_memory_pressure)(struct sock *sk);
        atomic_long_t           *memory_allocated;      /* Current allocated memory. */
        struct percpu_counter   *sockets_allocated;     /* Current number of sockets. */
        /*
@@ -1088,7 +1089,7 @@ struct proto {
         * All the __sk_mem_schedule() is of this nature: accounting
         * is strict, actions are advisory and have some latency.
         */
-       int                     *memory_pressure;
+       unsigned long           *memory_pressure;
        long                    *sysctl_mem;
        int                     *sysctl_wmem;
        int                     *sysctl_rmem;
@@ -1193,25 +1194,6 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
        return !!*sk->sk_prot->memory_pressure;
 }
 
-static inline void sk_leave_memory_pressure(struct sock *sk)
-{
-       int *memory_pressure = sk->sk_prot->memory_pressure;
-
-       if (!memory_pressure)
-               return;
-
-       if (*memory_pressure)
-               *memory_pressure = 0;
-}
-
-static inline void sk_enter_memory_pressure(struct sock *sk)
-{
-       if (!sk->sk_prot->enter_memory_pressure)
-               return;
-
-       sk->sk_prot->enter_memory_pressure(sk);
-}
-
 static inline long
 sk_memory_allocated(const struct sock *sk)
 {
index 929d6af321cde71a509577cb14747ecd5b77ca8c..c784a6ac6ef1b95fa6252e694427b895d42dd76c 100644 (file)
@@ -46,6 +46,7 @@ enum switchdev_attr_id {
        SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
        SWITCHDEV_ATTR_ID_PORT_STP_STATE,
        SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
+       SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT,
        SWITCHDEV_ATTR_ID_PORT_MROUTER,
        SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
        SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
@@ -62,6 +63,7 @@ struct switchdev_attr {
                struct netdev_phys_item_id ppid;        /* PORT_PARENT_ID */
                u8 stp_state;                           /* PORT_STP_STATE */
                unsigned long brport_flags;             /* PORT_BRIDGE_FLAGS */
+               unsigned long brport_flags_support;     /* PORT_BRIDGE_FLAGS_SUPPORT */
                bool mrouter;                           /* PORT_MROUTER */
                clock_t ageing_time;                    /* BRIDGE_AGEING_TIME */
                bool vlan_filtering;                    /* BRIDGE_VLAN_FILTERING */
@@ -153,8 +155,11 @@ struct switchdev_ops {
 };
 
 enum switchdev_notifier_type {
-       SWITCHDEV_FDB_ADD = 1,
-       SWITCHDEV_FDB_DEL,
+       SWITCHDEV_FDB_ADD_TO_BRIDGE = 1,
+       SWITCHDEV_FDB_DEL_TO_BRIDGE,
+       SWITCHDEV_FDB_ADD_TO_DEVICE,
+       SWITCHDEV_FDB_DEL_TO_DEVICE,
+       SWITCHDEV_FDB_OFFLOADED,
 };
 
 struct switchdev_notifier_info {
index 28b577a35786ddc9b223b54dd387e59910d9c521..3ab677d11d026e77deb3e7ec4fc2e0cf376af4ad 100644 (file)
@@ -237,9 +237,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
 
 
 /* sysctl variables for tcp */
-extern int sysctl_tcp_timestamps;
-extern int sysctl_tcp_window_scaling;
-extern int sysctl_tcp_sack;
 extern int sysctl_tcp_fastopen;
 extern int sysctl_tcp_retrans_collapse;
 extern int sysctl_tcp_stdurg;
@@ -279,7 +276,7 @@ extern int sysctl_tcp_pacing_ca_ratio;
 
 extern atomic_long_t tcp_memory_allocated;
 extern struct percpu_counter tcp_sockets_allocated;
-extern int tcp_memory_pressure;
+extern unsigned long tcp_memory_pressure;
 
 /* optimized version of sk_under_memory_pressure() for TCP sockets */
 static inline bool tcp_under_memory_pressure(const struct sock *sk)
@@ -427,7 +424,7 @@ void tcp_set_keepalive(struct sock *sk, int val);
 void tcp_syn_ack_timeout(const struct request_sock *req);
 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
                int flags, int *addr_len);
-void tcp_parse_options(const struct sk_buff *skb,
+void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
                       struct tcp_options_received *opt_rx,
                       int estab, struct tcp_fastopen_cookie *foc);
 const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
@@ -520,7 +517,8 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
                              u16 *mssp);
 __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
 u64 cookie_init_timestamp(struct request_sock *req);
-bool cookie_timestamp_decode(struct tcp_options_received *opt);
+bool cookie_timestamp_decode(const struct net *net,
+                            struct tcp_options_received *opt);
 bool cookie_ecn_ok(const struct tcp_options_received *opt,
                   const struct net *net, const struct dst_entry *dst);
 
@@ -1322,6 +1320,7 @@ extern void tcp_openreq_init_rwin(struct request_sock *req,
                                  const struct dst_entry *dst);
 
 void tcp_enter_memory_pressure(struct sock *sk);
+void tcp_leave_memory_pressure(struct sock *sk);
 
 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
 {
@@ -1870,7 +1869,7 @@ struct tcp_request_sock_ops {
        struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
                                       const struct request_sock *req);
        u32 (*init_seq)(const struct sk_buff *skb);
-       u32 (*init_ts_off)(const struct sk_buff *skb);
+       u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
        int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
                           struct flowi *fl, struct request_sock *req,
                           struct tcp_fastopen_cookie *foc,
index 9b2c10b45733e4dc66d601ac2ea52fed65f3752d..f94b48b168dcc82cbd67b916951a4699f4f675ef 100644 (file)
@@ -513,6 +513,11 @@ union bpf_attr {
  *     Get the owner uid of the socket stored inside sk_buff.
  *     @skb: pointer to skb
  *     Return: uid of the socket owner on success or overflowuid if failed.
+ *
+ * u32 bpf_set_hash(skb, hash)
+ *     Set full skb->hash.
+ *     @skb: pointer to skb
+ *     @hash: hash to set
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
@@ -562,7 +567,8 @@ union bpf_attr {
        FN(xdp_adjust_head),            \
        FN(probe_read_str),             \
        FN(get_socket_cookie),          \
-       FN(get_socket_uid),
+       FN(get_socket_uid),             \
+       FN(set_hash),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
index 1fe4c1e7d66e74c71724932bfe575d4ccc389f39..f904367c0ceef610d5ce9a78fea180bd4753a2f1 100644 (file)
@@ -110,6 +110,48 @@ struct igmpmsg {
        struct in_addr im_src,im_dst;
 };
 
+/* ipmr netlink table attributes */
+enum {
+       IPMRA_TABLE_UNSPEC,
+       IPMRA_TABLE_ID,
+       IPMRA_TABLE_CACHE_RES_QUEUE_LEN,
+       IPMRA_TABLE_MROUTE_REG_VIF_NUM,
+       IPMRA_TABLE_MROUTE_DO_ASSERT,
+       IPMRA_TABLE_MROUTE_DO_PIM,
+       IPMRA_TABLE_VIFS,
+       __IPMRA_TABLE_MAX
+};
+#define IPMRA_TABLE_MAX (__IPMRA_TABLE_MAX - 1)
+
+/* ipmr netlink vif attribute format
+ * [ IPMRA_TABLE_VIFS ] - nested attribute
+ *   [ IPMRA_VIF ] - nested attribute
+ *     [ IPMRA_VIFA_xxx ]
+ */
+enum {
+       IPMRA_VIF_UNSPEC,
+       IPMRA_VIF,
+       __IPMRA_VIF_MAX
+};
+#define IPMRA_VIF_MAX (__IPMRA_VIF_MAX - 1)
+
+/* vif-specific attributes */
+enum {
+       IPMRA_VIFA_UNSPEC,
+       IPMRA_VIFA_IFINDEX,
+       IPMRA_VIFA_VIF_ID,
+       IPMRA_VIFA_FLAGS,
+       IPMRA_VIFA_BYTES_IN,
+       IPMRA_VIFA_BYTES_OUT,
+       IPMRA_VIFA_PACKETS_IN,
+       IPMRA_VIFA_PACKETS_OUT,
+       IPMRA_VIFA_LOCAL_ADDR,
+       IPMRA_VIFA_REMOTE_ADDR,
+       IPMRA_VIFA_PAD,
+       __IPMRA_VIFA_MAX
+};
+#define IPMRA_VIFA_MAX (__IPMRA_VIFA_MAX - 1)
+
 /* That's all usermode folks */
 
 #define MFC_ASSERT_THRESH (3*HZ)               /* Maximal freq. of asserts */
index f3d16dbe09d64424d2d92c581f30771f6add6e0b..3199d28980b35442021ed1141151ab957b41c9f9 100644 (file)
@@ -41,6 +41,7 @@ enum {
 #define NTF_MASTER     0x04
 #define NTF_PROXY      0x08    /* == ATF_PUBL */
 #define NTF_EXT_LEARNED        0x10
+#define NTF_OFFLOADED   0x20
 #define NTF_ROUTER     0x80
 
 /*
index 95cffcb21dfdba7c974706131d0f43e21435e82d..d8569329579816213255169d0c183f4400835f7b 100644 (file)
@@ -228,6 +228,7 @@ enum
        LINUX_MIB_TCPABORTONLINGER,             /* TCPAbortOnLinger */
        LINUX_MIB_TCPABORTFAILED,               /* TCPAbortFailed */
        LINUX_MIB_TCPMEMORYPRESSURES,           /* TCPMemoryPressures */
+       LINUX_MIB_TCPMEMORYPRESSURESCHRONO,     /* TCPMemoryPressuresChrono */
        LINUX_MIB_TCPSACKDISCARD,               /* TCPSACKDiscard */
        LINUX_MIB_TCPDSACKIGNOREDOLD,           /* TCPSACKIgnoredOld */
        LINUX_MIB_TCPDSACKIGNOREDNOUNDO,        /* TCPSACKIgnoredNoUndo */
index 14ccb0759fa4daa57217fb334f43216feacd3f7b..519a6144d3d3a90f3a28b2eddc85adba9943d267 100644 (file)
@@ -1346,8 +1346,8 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
                if (reg->type != PTR_TO_PACKET &&
                    reg->type != PTR_TO_PACKET_END)
                        continue;
-               reg->type = UNKNOWN_VALUE;
-               reg->imm = 0;
+               __mark_reg_unknown_value(state->spilled_regs,
+                                        i / BPF_REG_SIZE);
        }
 }
 
@@ -1952,6 +1952,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                         */
                        regs[insn->dst_reg].type = CONST_IMM;
                        regs[insn->dst_reg].imm = insn->imm;
+                       regs[insn->dst_reg].id = 0;
                        regs[insn->dst_reg].max_value = insn->imm;
                        regs[insn->dst_reg].min_value = insn->imm;
                        regs[insn->dst_reg].min_align = calc_align(insn->imm);
@@ -2409,6 +2410,7 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
 
                regs[insn->dst_reg].type = CONST_IMM;
                regs[insn->dst_reg].imm = imm;
+               regs[insn->dst_reg].id = 0;
                return 0;
        }
 
@@ -2828,6 +2830,8 @@ static bool states_equal(struct bpf_verifier_env *env,
                        return false;
                if (i % BPF_REG_SIZE)
                        continue;
+               if (old->stack_slot_type[i] != STACK_SPILL)
+                       continue;
                if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE],
                           &cur->spilled_regs[i / BPF_REG_SIZE],
                           sizeof(old->spilled_regs[0])))
index 08eb072430b9e2732d22bf79394a8c9ad91be8d7..051d7fca0c09bd0aa88e84339a435222b47f7be5 100644 (file)
@@ -266,14 +266,16 @@ static const struct bpf_func_proto bpf_perf_event_read_proto = {
        .arg2_type      = ARG_ANYTHING,
 };
 
+static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd);
+
 static __always_inline u64
 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
                        u64 flags, struct perf_raw_record *raw)
 {
        struct bpf_array *array = container_of(map, struct bpf_array, map);
+       struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd);
        unsigned int cpu = smp_processor_id();
        u64 index = flags & BPF_F_INDEX_MASK;
-       struct perf_sample_data sample_data;
        struct bpf_event_entry *ee;
        struct perf_event *event;
 
@@ -294,9 +296,9 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
        if (unlikely(event->oncpu != cpu))
                return -EOPNOTSUPP;
 
-       perf_sample_data_init(&sample_data, 0, 0);
-       sample_data.raw = raw;
-       perf_event_output(event, &sample_data, regs);
+       perf_sample_data_init(sd, 0, 0);
+       sd->raw = raw;
+       perf_event_output(event, sd, regs);
        return 0;
 }
 
index 953b6728bd00c8ca7a4a20f2d2036c6f8f27f8e3..56d4b6977d03f1f900fc5eb08cf2372fd9904f26 100644 (file)
@@ -797,12 +797,6 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_netpoll_cleanup    = vlan_dev_netpoll_cleanup,
 #endif
        .ndo_fix_features       = vlan_dev_fix_features,
-       .ndo_fdb_add            = switchdev_port_fdb_add,
-       .ndo_fdb_del            = switchdev_port_fdb_del,
-       .ndo_fdb_dump           = switchdev_port_fdb_dump,
-       .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
-       .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
-       .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
        .ndo_get_lock_subclass  = vlan_dev_get_lock_subclass,
        .ndo_get_iflink         = vlan_dev_get_iflink,
 };
index 889e5640455f9636e2587e906da3194118b8f827..1407d1ba7577ffe553969d2cbefad2bd23f66aab 100644 (file)
@@ -121,7 +121,7 @@ static struct notifier_block br_device_notifier = {
        .notifier_call = br_device_event
 };
 
-/* called with RTNL */
+/* called with RTNL or RCU */
 static int br_switchdev_event(struct notifier_block *unused,
                              unsigned long event, void *ptr)
 {
@@ -131,27 +131,36 @@ static int br_switchdev_event(struct notifier_block *unused,
        struct switchdev_notifier_fdb_info *fdb_info;
        int err = NOTIFY_DONE;
 
-       p = br_port_get_rtnl(dev);
+       p = br_port_get_rtnl_rcu(dev);
        if (!p)
                goto out;
 
        br = p->br;
 
        switch (event) {
-       case SWITCHDEV_FDB_ADD:
+       case SWITCHDEV_FDB_ADD_TO_BRIDGE:
                fdb_info = ptr;
                err = br_fdb_external_learn_add(br, p, fdb_info->addr,
                                                fdb_info->vid);
-               if (err)
+               if (err) {
                        err = notifier_from_errno(err);
+                       break;
+               }
+               br_fdb_offloaded_set(br, p, fdb_info->addr,
+                                    fdb_info->vid);
                break;
-       case SWITCHDEV_FDB_DEL:
+       case SWITCHDEV_FDB_DEL_TO_BRIDGE:
                fdb_info = ptr;
                err = br_fdb_external_learn_del(br, p, fdb_info->addr,
                                                fdb_info->vid);
                if (err)
                        err = notifier_from_errno(err);
                break;
+       case SWITCHDEV_FDB_OFFLOADED:
+               fdb_info = ptr;
+               br_fdb_offloaded_set(br, p, fdb_info->addr,
+                                    fdb_info->vid);
+               break;
        }
 
 out:
index ab0c7cc8448f4824d69b9260e79ede7aac14dd9e..fef7872a320b4a0097009d25f2bd91e3f69d41dd 100644 (file)
@@ -511,6 +511,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
                fdb->is_static = is_static;
                fdb->added_by_user = 0;
                fdb->added_by_external_learn = 0;
+               fdb->offloaded = 0;
                fdb->updated = fdb->used = jiffies;
                hlist_add_head_rcu(&fdb->hlist, head);
        }
@@ -647,11 +648,16 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
        ndm->ndm_family  = AF_BRIDGE;
        ndm->ndm_pad1    = 0;
        ndm->ndm_pad2    = 0;
-       ndm->ndm_flags   = fdb->added_by_external_learn ? NTF_EXT_LEARNED : 0;
+       ndm->ndm_flags   = 0;
        ndm->ndm_type    = 0;
        ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
        ndm->ndm_state   = fdb_to_nud(br, fdb);
 
+       if (fdb->offloaded)
+               ndm->ndm_flags |= NTF_OFFLOADED;
+       if (fdb->added_by_external_learn)
+               ndm->ndm_flags |= NTF_EXT_LEARNED;
+
        if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
                goto nla_put_failure;
        if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
@@ -690,6 +696,8 @@ static void fdb_notify(struct net_bridge *br,
        struct sk_buff *skb;
        int err = -ENOBUFS;
 
+       br_switchdev_fdb_notify(fdb, type);
+
        skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
        if (skb == NULL)
                goto errout;
@@ -1075,7 +1083,6 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
        struct net_bridge_fdb_entry *fdb;
        int err = 0;
 
-       ASSERT_RTNL();
        spin_lock_bh(&br->hash_lock);
 
        head = &br->hash[br_mac_hash(addr, vid)];
@@ -1110,7 +1117,6 @@ int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
        struct net_bridge_fdb_entry *fdb;
        int err = 0;
 
-       ASSERT_RTNL();
        spin_lock_bh(&br->hash_lock);
 
        fdb = br_fdb_find(br, addr, vid);
@@ -1123,3 +1129,17 @@ int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
 
        return err;
 }
+
+void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
+                         const unsigned char *addr, u16 vid)
+{
+       struct net_bridge_fdb_entry *fdb;
+
+       spin_lock_bh(&br->hash_lock);
+
+       fdb = br_fdb_find(br, addr, vid);
+       if (fdb)
+               fdb->offloaded = 1;
+
+       spin_unlock_bh(&br->hash_lock);
+}
index 3bcda556971e71859bfe8fdf7395edb0f799a61e..63dca347b73bc04af101bf9598bcbb5c42d3cfc6 100644 (file)
@@ -662,16 +662,26 @@ static int br_set_port_state(struct net_bridge_port *p, u8 state)
 }
 
 /* Set/clear or port flags based on attribute */
-static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
-                          int attrtype, unsigned long mask)
+static int br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
+                           int attrtype, unsigned long mask)
 {
-       if (tb[attrtype]) {
-               u8 flag = nla_get_u8(tb[attrtype]);
-               if (flag)
-                       p->flags |= mask;
-               else
-                       p->flags &= ~mask;
-       }
+       unsigned long flags;
+       int err;
+
+       if (!tb[attrtype])
+               return 0;
+
+       if (nla_get_u8(tb[attrtype]))
+               flags = p->flags | mask;
+       else
+               flags = p->flags & ~mask;
+
+       err = br_switchdev_set_port_flag(p, flags, mask);
+       if (err)
+               return err;
+
+       p->flags = flags;
+       return 0;
 }
 
 /* Process bridge protocol info on port */
@@ -681,20 +691,55 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
        bool br_vlan_tunnel_old = false;
        int err;
 
-       br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
-       br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
-       br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
-       br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
-       br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
-       br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
-       br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
-       br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST);
-       br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD);
-       br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
-       br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
+       err = br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
+       if (err)
+               return err;
+
+       err = br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
+       if (err)
+               return err;
+
+       err = br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
+       if (err)
+               return err;
+
+       err = br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
+       if (err)
+               return err;
+
+       err = br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
+       if (err)
+               return err;
+
+       err = br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
+       if (err)
+               return err;
+
+       err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
+       if (err)
+               return err;
+
+       err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST);
+       if (err)
+               return err;
+
+       err = br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD);
+       if (err)
+               return err;
+
+       err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
+       if (err)
+               return err;
+
+       err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
+       if (err)
+               return err;
 
        br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false;
-       br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
+       err = br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
+       if (err)
+               return err;
+
        if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL))
                nbp_vlan_tunnel_info_flush(p);
 
index 20626927f43368a32515ddc331461360a484f1be..c18682f804a0b4b40b76ceeae1998f21098c12c9 100644 (file)
@@ -169,7 +169,8 @@ struct net_bridge_fdb_entry {
        unsigned char                   is_local:1,
                                        is_static:1,
                                        added_by_user:1,
-                                       added_by_external_learn:1;
+                                       added_by_external_learn:1,
+                                       offloaded:1;
 
        /* write-heavy members should not affect lookups */
        unsigned long                   updated ____cacheline_aligned_in_smp;
@@ -284,6 +285,12 @@ static inline struct net_bridge_port *br_port_get_rtnl(const struct net_device *
                rtnl_dereference(dev->rx_handler_data) : NULL;
 }
 
+static inline struct net_bridge_port *br_port_get_rtnl_rcu(const struct net_device *dev)
+{
+       return br_port_exists(dev) ?
+               rcu_dereference_rtnl(dev->rx_handler_data) : NULL;
+}
+
 struct net_bridge {
        spinlock_t                      lock;
        spinlock_t                      hash_lock;
@@ -530,6 +537,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
                              const unsigned char *addr, u16 vid);
 int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
                              const unsigned char *addr, u16 vid);
+void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
+                         const unsigned char *addr, u16 vid);
 
 /* br_forward.c */
 enum br_pkt_type {
@@ -1076,6 +1085,11 @@ void nbp_switchdev_frame_mark(const struct net_bridge_port *p,
                              struct sk_buff *skb);
 bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
                                  const struct sk_buff *skb);
+int br_switchdev_set_port_flag(struct net_bridge_port *p,
+                              unsigned long flags,
+                              unsigned long mask);
+void br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb,
+                            int type);
 #else
 static inline int nbp_switchdev_mark_set(struct net_bridge_port *p)
 {
@@ -1092,6 +1106,18 @@ static inline bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
 {
        return true;
 }
+
+static inline int br_switchdev_set_port_flag(struct net_bridge_port *p,
+                                            unsigned long flags,
+                                            unsigned long mask)
+{
+       return 0;
+}
+
+static inline void
+br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
+{
+}
 #endif /* CONFIG_NET_SWITCHDEV */
 
 #endif
index f4097b900de1ff51a9d29c967c370c916c6092f5..181a44d0f1da6364a8965b54cf13aa6a5e44ef22 100644 (file)
@@ -55,3 +55,79 @@ bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
        return !skb->offload_fwd_mark ||
               BR_INPUT_SKB_CB(skb)->offload_fwd_mark != p->offload_fwd_mark;
 }
+
+/* Flags that can be offloaded to hardware */
+#define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | \
+                                 BR_MCAST_FLOOD | BR_BCAST_FLOOD)
+
+int br_switchdev_set_port_flag(struct net_bridge_port *p,
+                              unsigned long flags,
+                              unsigned long mask)
+{
+       struct switchdev_attr attr = {
+               .orig_dev = p->dev,
+               .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT,
+       };
+       int err;
+
+       if (mask & ~BR_PORT_FLAGS_HW_OFFLOAD)
+               return 0;
+
+       err = switchdev_port_attr_get(p->dev, &attr);
+       if (err == -EOPNOTSUPP)
+               return 0;
+       if (err)
+               return err;
+
+       /* Check if specific bridge flag attribute offload is supported */
+       if (!(attr.u.brport_flags_support & mask)) {
+               br_warn(p->br, "bridge flag offload is not supported %u(%s)\n",
+                       (unsigned int)p->port_no, p->dev->name);
+               return -EOPNOTSUPP;
+       }
+
+       attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS;
+       attr.flags = SWITCHDEV_F_DEFER;
+       attr.u.brport_flags = flags;
+       err = switchdev_port_attr_set(p->dev, &attr);
+       if (err) {
+               br_warn(p->br, "error setting offload flag on port %u(%s)\n",
+                       (unsigned int)p->port_no, p->dev->name);
+               return err;
+       }
+
+       return 0;
+}
+
+static void
+br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac,
+                               u16 vid, struct net_device *dev)
+{
+       struct switchdev_notifier_fdb_info info;
+       unsigned long notifier_type;
+
+       info.addr = mac;
+       info.vid = vid;
+       notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_DEVICE : SWITCHDEV_FDB_DEL_TO_DEVICE;
+       call_switchdev_notifiers(notifier_type, dev, &info.info);
+}
+
+void
+br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
+{
+       if (!fdb->added_by_user)
+               return;
+
+       switch (type) {
+       case RTM_DELNEIGH:
+               br_switchdev_fdb_call_notifiers(false, fdb->addr.addr,
+                                               fdb->vlan_id,
+                                               fdb->dst->dev);
+               break;
+       case RTM_NEWNEIGH:
+               br_switchdev_fdb_call_notifiers(true, fdb->addr.addr,
+                                               fdb->vlan_id,
+                                               fdb->dst->dev);
+               break;
+       }
+}
index 946f758d44f2f122d34aa33caec0a92f682a1802..a65a3b25e104221d49c6e26ac24a4d1843af2fe4 100644 (file)
@@ -1874,6 +1874,24 @@ static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
        .arg1_type      = ARG_PTR_TO_CTX,
 };
 
+BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
+{
+       /* Set user specified hash as L4(+), so that it gets returned
+        * on skb_get_hash() call unless BPF prog later on triggers a
+        * skb_clear_hash().
+        */
+       __skb_set_sw_hash(skb, hash, true);
+       return 0;
+}
+
+static const struct bpf_func_proto bpf_set_hash_proto = {
+       .func           = bpf_set_hash,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+};
+
 BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
           u16, vlan_tci)
 {
@@ -2744,6 +2762,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
                return &bpf_get_hash_recalc_proto;
        case BPF_FUNC_set_hash_invalid:
                return &bpf_set_hash_invalid_proto;
+       case BPF_FUNC_set_hash:
+               return &bpf_set_hash_proto;
        case BPF_FUNC_perf_event_output:
                return &bpf_skb_event_output_proto;
        case BPF_FUNC_get_smp_processor_id:
@@ -2774,12 +2794,6 @@ xdp_func_proto(enum bpf_func_id func_id)
        }
 }
 
-static const struct bpf_func_proto *
-cg_skb_func_proto(enum bpf_func_id func_id)
-{
-       return sk_filter_func_proto(func_id);
-}
-
 static const struct bpf_func_proto *
 lwt_inout_func_proto(enum bpf_func_id func_id)
 {
@@ -3344,7 +3358,7 @@ const struct bpf_verifier_ops xdp_prog_ops = {
 };
 
 const struct bpf_verifier_ops cg_skb_prog_ops = {
-       .get_func_proto         = cg_skb_func_proto,
+       .get_func_proto         = sk_filter_func_proto,
        .is_valid_access        = sk_filter_is_valid_access,
        .convert_ctx_access     = bpf_convert_ctx_access,
        .test_run               = bpf_prog_test_run_skb,
index 26bbfababff27cecc589bca69e3eb14739187f5b..2178db8e47cd332a3ca0de850cd574a5acaadb10 100644 (file)
@@ -596,6 +596,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
 {
        struct net *net = sock_net(skb->sk);
        struct nlattr *tb[NETNSA_MAX + 1];
+       struct nlattr *nla;
        struct net *peer;
        int nsid, err;
 
@@ -603,23 +604,35 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
                          rtnl_net_policy, extack);
        if (err < 0)
                return err;
-       if (!tb[NETNSA_NSID])
+       if (!tb[NETNSA_NSID]) {
+               NL_SET_ERR_MSG(extack, "nsid is missing");
                return -EINVAL;
+       }
        nsid = nla_get_s32(tb[NETNSA_NSID]);
 
-       if (tb[NETNSA_PID])
+       if (tb[NETNSA_PID]) {
                peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
-       else if (tb[NETNSA_FD])
+               nla = tb[NETNSA_PID];
+       } else if (tb[NETNSA_FD]) {
                peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
-       else
+               nla = tb[NETNSA_FD];
+       } else {
+               NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
                return -EINVAL;
-       if (IS_ERR(peer))
+       }
+       if (IS_ERR(peer)) {
+               NL_SET_BAD_ATTR(extack, nla);
+               NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
                return PTR_ERR(peer);
+       }
 
        spin_lock_bh(&net->nsid_lock);
        if (__peernet2id(net, peer) >= 0) {
                spin_unlock_bh(&net->nsid_lock);
                err = -EEXIST;
+               NL_SET_BAD_ATTR(extack, nla);
+               NL_SET_ERR_MSG(extack,
+                              "Peer netns already has a nsid assigned");
                goto out;
        }
 
@@ -628,6 +641,10 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (err >= 0) {
                rtnl_net_notifyid(net, RTM_NEWNSID, err);
                err = 0;
+       } else if (err == -ENOSPC && nsid >= 0) {
+               err = -EEXIST;
+               NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
+               NL_SET_ERR_MSG(extack, "The specified nsid is already used");
        }
 out:
        put_net(peer);
@@ -670,6 +687,7 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
 {
        struct net *net = sock_net(skb->sk);
        struct nlattr *tb[NETNSA_MAX + 1];
+       struct nlattr *nla;
        struct sk_buff *msg;
        struct net *peer;
        int err, id;
@@ -678,15 +696,22 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
                          rtnl_net_policy, extack);
        if (err < 0)
                return err;
-       if (tb[NETNSA_PID])
+       if (tb[NETNSA_PID]) {
                peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
-       else if (tb[NETNSA_FD])
+               nla = tb[NETNSA_PID];
+       } else if (tb[NETNSA_FD]) {
                peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
-       else
+               nla = tb[NETNSA_FD];
+       } else {
+               NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
                return -EINVAL;
+       }
 
-       if (IS_ERR(peer))
+       if (IS_ERR(peer)) {
+               NL_SET_BAD_ATTR(extack, nla);
+               NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
                return PTR_ERR(peer);
+       }
 
        msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
        if (!msg) {
index ae35cce3a40d70387bee815798933aa43a0e6d84..7232274de334bbd0852b80fc286ee316e22946d7 100644 (file)
@@ -51,7 +51,8 @@ static u32 seq_scale(u32 seq)
 #endif
 
 #if IS_ENABLED(CONFIG_IPV6)
-u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
+u32 secure_tcpv6_ts_off(const struct net *net,
+                       const __be32 *saddr, const __be32 *daddr)
 {
        const struct {
                struct in6_addr saddr;
@@ -61,7 +62,7 @@ u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
                .daddr = *(struct in6_addr *)daddr,
        };
 
-       if (sysctl_tcp_timestamps != 1)
+       if (net->ipv4.sysctl_tcp_timestamps != 1)
                return 0;
 
        ts_secret_init();
@@ -113,9 +114,9 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
 #endif
 
 #ifdef CONFIG_INET
-u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
+u32 secure_tcp_ts_off(const struct net *net, __be32 saddr, __be32 daddr)
 {
-       if (sysctl_tcp_timestamps != 1)
+       if (net->ipv4.sysctl_tcp_timestamps != 1)
                return 0;
 
        ts_secret_init();
index 82cfc9c7a090060036f40f75954651c241db9dc1..e508c1eae67fb433ae165875876450fd4307a7b2 100644 (file)
@@ -2646,7 +2646,8 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
 {
        int pos = skb_headlen(skb);
 
-       skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
+       skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
+                                     SKBTX_SHARED_FRAG;
        if (len < pos)  /* Split line is inside header. */
                skb_split_inside_header(skb, skb1, len, pos);
        else            /* Second chunk has no header, nothing to copy. */
@@ -3261,8 +3262,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
                skb_copy_from_linear_data_offset(head_skb, offset,
                                                 skb_put(nskb, hsize), hsize);
 
-               skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
-                       SKBTX_SHARED_FRAG;
+               skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
+                                             SKBTX_SHARED_FRAG;
 
                while (pos < offset + len) {
                        if (i >= nfrags) {
@@ -3948,7 +3949,8 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
                return;
 
        if (tsonly) {
-               skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags;
+               skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
+                                            SKBTX_ANY_TSTAMP;
                skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
        }
 
index bef844127e0182091678b9d57f7ec85c5241748d..ad8a4bc841267a442a1da3c56ef1cf074f9825b9 100644 (file)
@@ -2076,6 +2076,26 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
 }
 EXPORT_SYMBOL(sock_cmsg_send);
 
+static void sk_enter_memory_pressure(struct sock *sk)
+{
+       if (!sk->sk_prot->enter_memory_pressure)
+               return;
+
+       sk->sk_prot->enter_memory_pressure(sk);
+}
+
+static void sk_leave_memory_pressure(struct sock *sk)
+{
+       if (sk->sk_prot->leave_memory_pressure) {
+               sk->sk_prot->leave_memory_pressure(sk);
+       } else {
+               unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
+
+               if (memory_pressure && *memory_pressure)
+                       *memory_pressure = 0;
+       }
+}
+
 /* On 32bit arches, an skb frag is limited to 2^15 */
 #define SKB_FRAG_PAGE_ORDER    get_order(32768)
 
index 405483a07efc7ac2efcfe86e285a7673547c9691..73a0399dc7a277178b0a432a067172131dce99ee 100644 (file)
@@ -447,7 +447,7 @@ static void dn_destruct(struct sock *sk)
        dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
 }
 
-static int dn_memory_pressure;
+static unsigned long dn_memory_pressure;
 
 static void dn_enter_memory_pressure(struct sock *sk)
 {
index 1cfdb31a2f442121d2cbff3243780e2d899da0ce..5f3caee725eec423307a336c65c9280d247b9986 100644 (file)
@@ -836,10 +836,13 @@ static void dsa_slave_del_cls_matchall(struct net_device *dev,
 }
 
 static int dsa_slave_setup_tc(struct net_device *dev, u32 handle,
-                             __be16 protocol, struct tc_to_netdev *tc)
+                             u32 chain_index, __be16 protocol,
+                             struct tc_to_netdev *tc)
 {
        bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
-       int ret = -EOPNOTSUPP;
+
+       if (chain_index)
+               return -EOPNOTSUPP;
 
        switch (tc->type) {
        case TC_SETUP_MATCHALL:
@@ -853,10 +856,8 @@ static int dsa_slave_setup_tc(struct net_device *dev, u32 handle,
                        return 0;
                }
        default:
-               break;
+               return -EOPNOTSUPP;
        }
-
-       return ret;
 }
 
 void dsa_cpu_port_ethtool_init(struct ethtool_ops *ops)
index d8e5c311ee7cda455c961f09b542e301decadc94..f1029a8d0e20f2263f3a55230c7710d20ac15fbf 100644 (file)
@@ -159,19 +159,30 @@ static int dsa_switch_vlan_add(struct dsa_switch *ds,
 {
        const struct switchdev_obj_port_vlan *vlan = info->vlan;
        struct switchdev_trans *trans = info->trans;
+       DECLARE_BITMAP(members, ds->num_ports);
+       int port, err;
 
-       /* Do not care yet about other switch chips of the fabric */
-       if (ds->index != info->sw_index)
-               return 0;
+       /* Build a mask of VLAN members */
+       bitmap_zero(members, ds->num_ports);
+       if (ds->index == info->sw_index)
+               set_bit(info->port, members);
+       for (port = 0; port < ds->num_ports; port++)
+               if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+                       set_bit(port, members);
 
        if (switchdev_trans_ph_prepare(trans)) {
                if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
                        return -EOPNOTSUPP;
 
-               return ds->ops->port_vlan_prepare(ds, info->port, vlan, trans);
+               for_each_set_bit(port, members, ds->num_ports) {
+                       err = ds->ops->port_vlan_prepare(ds, port, vlan, trans);
+                       if (err)
+                               return err;
+               }
        }
 
-       ds->ops->port_vlan_add(ds, info->port, vlan, trans);
+       for_each_set_bit(port, members, ds->num_ports)
+               ds->ops->port_vlan_add(ds, port, vlan, trans);
 
        return 0;
 }
@@ -181,14 +192,13 @@ static int dsa_switch_vlan_del(struct dsa_switch *ds,
 {
        const struct switchdev_obj_port_vlan *vlan = info->vlan;
 
-       /* Do not care yet about other switch chips of the fabric */
-       if (ds->index != info->sw_index)
-               return 0;
-
        if (!ds->ops->port_vlan_del)
                return -EOPNOTSUPP;
 
-       return ds->ops->port_vlan_del(ds, info->port, vlan);
+       if (ds->index == info->sw_index)
+               return ds->ops->port_vlan_del(ds, info->port, vlan);
+
+       return 0;
 }
 
 static int dsa_switch_event(struct notifier_block *nb,
index df14815a3b8ce74aeb613458ffaf5f6eee4a263d..a7dd088d5fc96da259813cae48c374527c9e251e 100644 (file)
@@ -176,6 +176,7 @@ EXPORT_SYMBOL(__ip_dev_find);
 static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
 
 static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
+static BLOCKING_NOTIFIER_HEAD(inetaddr_validator_chain);
 static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
                         int destroy);
 #ifdef CONFIG_SYSCTL
@@ -441,6 +442,8 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
 {
        struct in_device *in_dev = ifa->ifa_dev;
        struct in_ifaddr *ifa1, **ifap, **last_primary;
+       struct in_validator_info ivi;
+       int ret;
 
        ASSERT_RTNL();
 
@@ -471,6 +474,23 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
                }
        }
 
+       /* Allow any devices that wish to register ifaddr validtors to weigh
+        * in now, before changes are committed.  The rntl lock is serializing
+        * access here, so the state should not change between a validator call
+        * and a final notify on commit.  This isn't invoked on promotion under
+        * the assumption that validators are checking the address itself, and
+        * not the flags.
+        */
+       ivi.ivi_addr = ifa->ifa_address;
+       ivi.ivi_dev = ifa->ifa_dev;
+       ret = blocking_notifier_call_chain(&inetaddr_validator_chain,
+                                          NETDEV_UP, &ivi);
+       ret = notifier_to_errno(ret);
+       if (ret) {
+               inet_free_ifa(ifa);
+               return ret;
+       }
+
        if (!(ifa->ifa_flags & IFA_F_SECONDARY)) {
                prandom_seed((__force u32) ifa->ifa_local);
                ifap = last_primary;
@@ -1356,6 +1376,19 @@ int unregister_inetaddr_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL(unregister_inetaddr_notifier);
 
+int register_inetaddr_validator_notifier(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_register(&inetaddr_validator_chain, nb);
+}
+EXPORT_SYMBOL(register_inetaddr_validator_notifier);
+
+int unregister_inetaddr_validator_notifier(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_unregister(&inetaddr_validator_chain,
+           nb);
+}
+EXPORT_SYMBOL(unregister_inetaddr_validator_notifier);
+
 /* Rename ifa_labels for a device name change. Make some effort to preserve
  * existing alias numbering and to create unique labels if possible.
 */
index 551de4d023a8edbf74835b43cb32d9173eedae36..9374b99c7c172435ebe0c442be244b489395241d 100644 (file)
@@ -2528,6 +2528,129 @@ static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh,
                return ipmr_mfc_delete(tbl, &mfcc, parent);
 }
 
+static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
+{
+       u32 queue_len = atomic_read(&mrt->cache_resolve_queue_len);
+
+       if (nla_put_u32(skb, IPMRA_TABLE_ID, mrt->id) ||
+           nla_put_u32(skb, IPMRA_TABLE_CACHE_RES_QUEUE_LEN, queue_len) ||
+           nla_put_s32(skb, IPMRA_TABLE_MROUTE_REG_VIF_NUM,
+                       mrt->mroute_reg_vif_num) ||
+           nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT,
+                      mrt->mroute_do_assert) ||
+           nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim))
+               return false;
+
+       return true;
+}
+
+static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
+{
+       struct nlattr *vif_nest;
+       struct vif_device *vif;
+
+       /* if the VIF doesn't exist just continue */
+       if (!VIF_EXISTS(mrt, vifid))
+               return true;
+
+       vif = &mrt->vif_table[vifid];
+       vif_nest = nla_nest_start(skb, IPMRA_VIF);
+       if (!vif_nest)
+               return false;
+       if (nla_put_u32(skb, IPMRA_VIFA_IFINDEX, vif->dev->ifindex) ||
+           nla_put_u32(skb, IPMRA_VIFA_VIF_ID, vifid) ||
+           nla_put_u16(skb, IPMRA_VIFA_FLAGS, vif->flags) ||
+           nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_IN, vif->bytes_in,
+                             IPMRA_VIFA_PAD) ||
+           nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_OUT, vif->bytes_out,
+                             IPMRA_VIFA_PAD) ||
+           nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_IN, vif->pkt_in,
+                             IPMRA_VIFA_PAD) ||
+           nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_OUT, vif->pkt_out,
+                             IPMRA_VIFA_PAD) ||
+           nla_put_be32(skb, IPMRA_VIFA_LOCAL_ADDR, vif->local) ||
+           nla_put_be32(skb, IPMRA_VIFA_REMOTE_ADDR, vif->remote)) {
+               nla_nest_cancel(skb, vif_nest);
+               return false;
+       }
+       nla_nest_end(skb, vif_nest);
+
+       return true;
+}
+
+static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct net *net = sock_net(skb->sk);
+       struct nlmsghdr *nlh = NULL;
+       unsigned int t = 0, s_t;
+       unsigned int e = 0, s_e;
+       struct mr_table *mrt;
+
+       s_t = cb->args[0];
+       s_e = cb->args[1];
+
+       ipmr_for_each_table(mrt, net) {
+               struct nlattr *vifs, *af;
+               struct ifinfomsg *hdr;
+               u32 i;
+
+               if (t < s_t)
+                       goto skip_table;
+               nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
+                               cb->nlh->nlmsg_seq, RTM_NEWLINK,
+                               sizeof(*hdr), NLM_F_MULTI);
+               if (!nlh)
+                       break;
+
+               hdr = nlmsg_data(nlh);
+               memset(hdr, 0, sizeof(*hdr));
+               hdr->ifi_family = RTNL_FAMILY_IPMR;
+
+               af = nla_nest_start(skb, IFLA_AF_SPEC);
+               if (!af) {
+                       nlmsg_cancel(skb, nlh);
+                       goto out;
+               }
+
+               if (!ipmr_fill_table(mrt, skb)) {
+                       nlmsg_cancel(skb, nlh);
+                       goto out;
+               }
+
+               vifs = nla_nest_start(skb, IPMRA_TABLE_VIFS);
+               if (!vifs) {
+                       nla_nest_end(skb, af);
+                       nlmsg_end(skb, nlh);
+                       goto out;
+               }
+               for (i = 0; i < mrt->maxvif; i++) {
+                       if (e < s_e)
+                               goto skip_entry;
+                       if (!ipmr_fill_vif(mrt, i, skb)) {
+                               nla_nest_end(skb, vifs);
+                               nla_nest_end(skb, af);
+                               nlmsg_end(skb, nlh);
+                               goto out;
+                       }
+skip_entry:
+                       e++;
+               }
+               s_e = 0;
+               e = 0;
+               nla_nest_end(skb, vifs);
+               nla_nest_end(skb, af);
+               nlmsg_end(skb, nlh);
+skip_table:
+               t++;
+       }
+
+out:
+       cb->args[1] = e;
+       cb->args[0] = t;
+
+       return skb->len;
+}
+
 #ifdef CONFIG_PROC_FS
 /* The /proc interfaces to multicast routing :
  * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
@@ -2870,6 +2993,9 @@ int __init ip_mr_init(void)
                      ipmr_rtm_route, NULL, NULL);
        rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE,
                      ipmr_rtm_route, NULL, NULL);
+
+       rtnl_register(RTNL_FAMILY_IPMR, RTM_GETLINK,
+                     NULL, ipmr_rtm_dumplink, NULL);
        return 0;
 
 #ifdef CONFIG_IP_PIMSM_V2
index fa44e752a9a3f8eb9957314149ae15e6df10465a..43eb6567b3a0a2add9a1d36019eae5b6d5caf657 100644 (file)
@@ -250,6 +250,7 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPAbortOnLinger", LINUX_MIB_TCPABORTONLINGER),
        SNMP_MIB_ITEM("TCPAbortFailed", LINUX_MIB_TCPABORTFAILED),
        SNMP_MIB_ITEM("TCPMemoryPressures", LINUX_MIB_TCPMEMORYPRESSURES),
+       SNMP_MIB_ITEM("TCPMemoryPressuresChrono", LINUX_MIB_TCPMEMORYPRESSURESCHRONO),
        SNMP_MIB_ITEM("TCPSACKDiscard", LINUX_MIB_TCPSACKDISCARD),
        SNMP_MIB_ITEM("TCPDSACKIgnoredOld", LINUX_MIB_TCPDSACKIGNOREDOLD),
        SNMP_MIB_ITEM("TCPDSACKIgnoredNoUndo", LINUX_MIB_TCPDSACKIGNOREDNOUNDO),
index 6426250a58ea1afb29b673c00bb9d58bd3d21122..7835bb4a1fab2b335c65001cc3c9233ffb4fd5cc 100644 (file)
@@ -232,7 +232,8 @@ EXPORT_SYMBOL(tcp_get_cookie_sock);
  * return false if we decode a tcp option that is disabled
  * on the host.
  */
-bool cookie_timestamp_decode(struct tcp_options_received *tcp_opt)
+bool cookie_timestamp_decode(const struct net *net,
+                            struct tcp_options_received *tcp_opt)
 {
        /* echoed timestamp, lowest bits contain options */
        u32 options = tcp_opt->rcv_tsecr;
@@ -242,12 +243,12 @@ bool cookie_timestamp_decode(struct tcp_options_received *tcp_opt)
                return true;
        }
 
-       if (!sysctl_tcp_timestamps)
+       if (!net->ipv4.sysctl_tcp_timestamps)
                return false;
 
        tcp_opt->sack_ok = (options & TS_OPT_SACK) ? TCP_SACK_SEEN : 0;
 
-       if (tcp_opt->sack_ok && !sysctl_tcp_sack)
+       if (tcp_opt->sack_ok && !net->ipv4.sysctl_tcp_sack)
                return false;
 
        if ((options & TS_OPT_WSCALE_MASK) == TS_OPT_WSCALE_MASK)
@@ -256,7 +257,7 @@ bool cookie_timestamp_decode(struct tcp_options_received *tcp_opt)
        tcp_opt->wscale_ok = 1;
        tcp_opt->snd_wscale = options & TS_OPT_WSCALE_MASK;
 
-       return sysctl_tcp_window_scaling != 0;
+       return net->ipv4.sysctl_tcp_window_scaling != 0;
 }
 EXPORT_SYMBOL(cookie_timestamp_decode);
 
@@ -312,14 +313,16 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
 
        /* check for timestamp cookie support */
        memset(&tcp_opt, 0, sizeof(tcp_opt));
-       tcp_parse_options(skb, &tcp_opt, 0, NULL);
+       tcp_parse_options(sock_net(sk), skb, &tcp_opt, 0, NULL);
 
        if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
-               tsoff = secure_tcp_ts_off(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
+               tsoff = secure_tcp_ts_off(sock_net(sk),
+                                         ip_hdr(skb)->daddr,
+                                         ip_hdr(skb)->saddr);
                tcp_opt.rcv_tsecr -= tsoff;
        }
 
-       if (!cookie_timestamp_decode(&tcp_opt))
+       if (!cookie_timestamp_decode(sock_net(sk), &tcp_opt))
                goto out;
 
        ret = NULL;
index 86957e9cd6c6748ac00aa0307154bb131c43f1da..7065234a89a50ae977f2de1c2ec7ca867ac2e6e2 100644 (file)
@@ -364,27 +364,6 @@ static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table,
 }
 
 static struct ctl_table ipv4_table[] = {
-       {
-               .procname       = "tcp_timestamps",
-               .data           = &sysctl_tcp_timestamps,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
-       {
-               .procname       = "tcp_window_scaling",
-               .data           = &sysctl_tcp_window_scaling,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
-       {
-               .procname       = "tcp_sack",
-               .data           = &sysctl_tcp_sack,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
        {
                .procname       = "tcp_retrans_collapse",
                .data           = &sysctl_tcp_retrans_collapse,
@@ -1116,6 +1095,27 @@ static struct ctl_table ipv4_net_table[] = {
                .extra2         = &one,
        },
 #endif
+       {
+               .procname       = "tcp_sack",
+               .data           = &init_net.ipv4.sysctl_tcp_sack,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "tcp_window_scaling",
+               .data           = &init_net.ipv4.sysctl_tcp_window_scaling,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "tcp_timestamps",
+               .data           = &init_net.ipv4.sysctl_tcp_timestamps,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
        { }
 };
 
index 87981fcdfcf20c6846ea3474dce1e640aea6e092..cc8fd8b747a47e9b66492ecdf27256ef6d879877 100644 (file)
@@ -320,17 +320,36 @@ struct tcp_splice_state {
  * All the __sk_mem_schedule() is of this nature: accounting
  * is strict, actions are advisory and have some latency.
  */
-int tcp_memory_pressure __read_mostly;
-EXPORT_SYMBOL(tcp_memory_pressure);
+unsigned long tcp_memory_pressure __read_mostly;
+EXPORT_SYMBOL_GPL(tcp_memory_pressure);
 
 void tcp_enter_memory_pressure(struct sock *sk)
 {
-       if (!tcp_memory_pressure) {
+       unsigned long val;
+
+       if (tcp_memory_pressure)
+               return;
+       val = jiffies;
+
+       if (!val)
+               val--;
+       if (!cmpxchg(&tcp_memory_pressure, 0, val))
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
-               tcp_memory_pressure = 1;
-       }
 }
-EXPORT_SYMBOL(tcp_enter_memory_pressure);
+EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure);
+
+void tcp_leave_memory_pressure(struct sock *sk)
+{
+       unsigned long val;
+
+       if (!tcp_memory_pressure)
+               return;
+       val = xchg(&tcp_memory_pressure, 0);
+       if (val)
+               NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO,
+                             jiffies_to_msecs(jiffies - val));
+}
+EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure);
 
 /* Convert seconds to retransmits based on initial and max timeout */
 static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
index 4ea8ec5c7bb410834d1c54e0159467ae08d4cd15..2ab7e2fa9bb9727a6d22552f851d6254ea074481 100644 (file)
@@ -76,9 +76,6 @@
 #include <asm/unaligned.h>
 #include <linux/errqueue.h>
 
-int sysctl_tcp_timestamps __read_mostly = 1;
-int sysctl_tcp_window_scaling __read_mostly = 1;
-int sysctl_tcp_sack __read_mostly = 1;
 int sysctl_tcp_fack __read_mostly;
 int sysctl_tcp_max_reordering __read_mostly = 300;
 int sysctl_tcp_dsack __read_mostly = 1;
@@ -3724,7 +3721,8 @@ static void tcp_parse_fastopen_option(int len, const unsigned char *cookie,
  * But, this can also be called on packets in the established flow when
  * the fast version below fails.
  */
-void tcp_parse_options(const struct sk_buff *skb,
+void tcp_parse_options(const struct net *net,
+                      const struct sk_buff *skb,
                       struct tcp_options_received *opt_rx, int estab,
                       struct tcp_fastopen_cookie *foc)
 {
@@ -3765,7 +3763,7 @@ void tcp_parse_options(const struct sk_buff *skb,
                                break;
                        case TCPOPT_WINDOW:
                                if (opsize == TCPOLEN_WINDOW && th->syn &&
-                                   !estab && sysctl_tcp_window_scaling) {
+                                   !estab && net->ipv4.sysctl_tcp_window_scaling) {
                                        __u8 snd_wscale = *(__u8 *)ptr;
                                        opt_rx->wscale_ok = 1;
                                        if (snd_wscale > TCP_MAX_WSCALE) {
@@ -3781,7 +3779,7 @@ void tcp_parse_options(const struct sk_buff *skb,
                        case TCPOPT_TIMESTAMP:
                                if ((opsize == TCPOLEN_TIMESTAMP) &&
                                    ((estab && opt_rx->tstamp_ok) ||
-                                    (!estab && sysctl_tcp_timestamps))) {
+                                    (!estab && net->ipv4.sysctl_tcp_timestamps))) {
                                        opt_rx->saw_tstamp = 1;
                                        opt_rx->rcv_tsval = get_unaligned_be32(ptr);
                                        opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
@@ -3789,7 +3787,7 @@ void tcp_parse_options(const struct sk_buff *skb,
                                break;
                        case TCPOPT_SACK_PERM:
                                if (opsize == TCPOLEN_SACK_PERM && th->syn &&
-                                   !estab && sysctl_tcp_sack) {
+                                   !estab && net->ipv4.sysctl_tcp_sack) {
                                        opt_rx->sack_ok = TCP_SACK_SEEN;
                                        tcp_sack_reset(opt_rx);
                                }
@@ -3858,7 +3856,8 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
 /* Fast parse options. This hopes to only see timestamps.
  * If it is wrong it falls back on tcp_parse_options().
  */
-static bool tcp_fast_parse_options(const struct sk_buff *skb,
+static bool tcp_fast_parse_options(const struct net *net,
+                                  const struct sk_buff *skb,
                                   const struct tcphdr *th, struct tcp_sock *tp)
 {
        /* In the spirit of fast parsing, compare doff directly to constant
@@ -3873,7 +3872,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
                        return true;
        }
 
-       tcp_parse_options(skb, &tp->rx_opt, 1, NULL);
+       tcp_parse_options(net, skb, &tp->rx_opt, 1, NULL);
        if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
                tp->rx_opt.rcv_tsecr -= tp->tsoffset;
 
@@ -5234,7 +5233,8 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
        bool rst_seq_match = false;
 
        /* RFC1323: H1. Apply PAWS check first. */
-       if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
+       if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) &&
+           tp->rx_opt.saw_tstamp &&
            tcp_paws_discard(sk, skb)) {
                if (!th->rst) {
                        NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
@@ -5605,7 +5605,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
                /* Get original SYNACK MSS value if user MSS sets mss_clamp */
                tcp_clear_options(&opt);
                opt.user_mss = opt.mss_clamp = 0;
-               tcp_parse_options(synack, &opt, 0, NULL);
+               tcp_parse_options(sock_net(sk), synack, &opt, 0, NULL);
                mss = opt.mss_clamp;
        }
 
@@ -5659,7 +5659,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
        int saved_clamp = tp->rx_opt.mss_clamp;
        bool fastopen_fail;
 
-       tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
+       tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc);
        if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
                tp->rx_opt.rcv_tsecr -= tp->tsoffset;
 
@@ -6332,7 +6332,8 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
        tcp_clear_options(&tmp_opt);
        tmp_opt.mss_clamp = af_ops->mss_clamp;
        tmp_opt.user_mss  = tp->rx_opt.user_mss;
-       tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
+       tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0,
+                         want_cookie ? NULL : &foc);
 
        if (want_cookie && !tmp_opt.saw_tstamp)
                tcp_clear_options(&tmp_opt);
@@ -6350,7 +6351,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
                goto drop_and_free;
 
        if (tmp_opt.tstamp_ok)
-               tcp_rsk(req)->ts_off = af_ops->init_ts_off(skb);
+               tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb);
 
        if (!want_cookie && !isn) {
                /* Kill the following clause, if you dislike this way. */
index 191b2f78b19d2c8d62c59cc046bd608687679619..1dc8c449e16a5d5c0ed52708d1c88b669d55d6a8 100644 (file)
@@ -102,10 +102,9 @@ static u32 tcp_v4_init_seq(const struct sk_buff *skb)
                              tcp_hdr(skb)->source);
 }
 
-static u32 tcp_v4_init_ts_off(const struct sk_buff *skb)
+static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
 {
-       return secure_tcp_ts_off(ip_hdr(skb)->daddr,
-                                ip_hdr(skb)->saddr);
+       return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
 }
 
 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
@@ -242,7 +241,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                                                       inet->inet_daddr,
                                                       inet->inet_sport,
                                                       usin->sin_port);
-               tp->tsoffset = secure_tcp_ts_off(inet->inet_saddr,
+               tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
+                                                inet->inet_saddr,
                                                 inet->inet_daddr);
        }
 
@@ -2387,6 +2387,7 @@ struct proto tcp_prot = {
        .unhash                 = inet_unhash,
        .get_port               = inet_csk_get_port,
        .enter_memory_pressure  = tcp_enter_memory_pressure,
+       .leave_memory_pressure  = tcp_leave_memory_pressure,
        .stream_memory_free     = tcp_stream_memory_free,
        .sockets_allocated      = &tcp_sockets_allocated,
        .orphan_count           = &tcp_orphan_count,
@@ -2465,6 +2466,9 @@ static int __net_init tcp_sk_init(struct net *net)
        net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
 
        net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
+       net->ipv4.sysctl_tcp_sack = 1;
+       net->ipv4.sysctl_tcp_window_scaling = 1;
+       net->ipv4.sysctl_tcp_timestamps = 1;
 
        return 0;
 fail:
index d0642df7304452b57d2bc7f92a0a0c6d821553d3..d30ee31e94ebd4d76a0f2bd910f213bb994770f3 100644 (file)
@@ -98,7 +98,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
 
        tmp_opt.saw_tstamp = 0;
        if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
-               tcp_parse_options(skb, &tmp_opt, 0, NULL);
+               tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
 
                if (tmp_opt.saw_tstamp) {
                        if (tmp_opt.rcv_tsecr)
@@ -559,7 +559,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 
        tmp_opt.saw_tstamp = 0;
        if (th->doff > (sizeof(struct tcphdr)>>2)) {
-               tcp_parse_options(skb, &tmp_opt, 0, NULL);
+               tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
 
                if (tmp_opt.saw_tstamp) {
                        tmp_opt.ts_recent = req->ts_recent;
index e3aab1c1cf78d7b5696fc1438fe54f8ce448c09e..9a9c395b62352513a42b3353434be8134ac9cdd7 100644 (file)
@@ -569,18 +569,18 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
        opts->mss = tcp_advertise_mss(sk);
        remaining -= TCPOLEN_MSS_ALIGNED;
 
-       if (likely(sysctl_tcp_timestamps && !*md5)) {
+       if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) {
                opts->options |= OPTION_TS;
                opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
                opts->tsecr = tp->rx_opt.ts_recent;
                remaining -= TCPOLEN_TSTAMP_ALIGNED;
        }
-       if (likely(sysctl_tcp_window_scaling)) {
+       if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
                opts->ws = tp->rx_opt.rcv_wscale;
                opts->options |= OPTION_WSCALE;
                remaining -= TCPOLEN_WSCALE_ALIGNED;
        }
-       if (likely(sysctl_tcp_sack)) {
+       if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) {
                opts->options |= OPTION_SACK_ADVERTISE;
                if (unlikely(!(OPTION_TS & opts->options)))
                        remaining -= TCPOLEN_SACKPERM_ALIGNED;
@@ -3271,8 +3271,9 @@ static void tcp_connect_init(struct sock *sk)
        /* We'll fix this up when we get a response from the other end.
         * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
         */
-       tp->tcp_header_len = sizeof(struct tcphdr) +
-               (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
+       tp->tcp_header_len = sizeof(struct tcphdr);
+       if (sock_net(sk)->ipv4.sysctl_tcp_timestamps)
+               tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
 
 #ifdef CONFIG_TCP_MD5SIG
        if (tp->af_specific->md5_lookup(sk, sk))
@@ -3303,7 +3304,7 @@ static void tcp_connect_init(struct sock *sk)
                                  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
                                  &tp->rcv_wnd,
                                  &tp->window_clamp,
-                                 sysctl_tcp_window_scaling,
+                                 sock_net(sk)->ipv4.sysctl_tcp_window_scaling,
                                  &rcv_wscale,
                                  dst_metric(dst, RTAX_INITRWND));
 
index 25443fd946a80f16e627e8ad60f466e60a6ccadf..0aa36b09301305a41daea121b52f0e050d8e9cd5 100644 (file)
@@ -963,6 +963,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
        struct net *net = dev_net(idev->dev);
        struct inet6_ifaddr *ifa = NULL;
        struct rt6_info *rt;
+       struct in6_validator_info i6vi;
        unsigned int hash;
        int err = 0;
        int addr_type = ipv6_addr_type(addr);
@@ -974,6 +975,9 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
                return ERR_PTR(-EADDRNOTAVAIL);
 
        rcu_read_lock_bh();
+
+       in6_dev_hold(idev);
+
        if (idev->dead) {
                err = -ENODEV;                  /*XXX*/
                goto out2;
@@ -984,6 +988,17 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
                goto out2;
        }
 
+       i6vi.i6vi_addr = *addr;
+       i6vi.i6vi_dev = idev;
+       rcu_read_unlock_bh();
+
+       err = inet6addr_validator_notifier_call_chain(NETDEV_UP, &i6vi);
+
+       rcu_read_lock_bh();
+       err = notifier_to_errno(err);
+       if (err)
+               goto out2;
+
        spin_lock(&addrconf_hash_lock);
 
        /* Ignore adding duplicate addresses on an interface */
@@ -1034,7 +1049,6 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
        ifa->rt = rt;
 
        ifa->idev = idev;
-       in6_dev_hold(idev);
        /* For caller */
        in6_ifa_hold(ifa);
 
@@ -1062,6 +1076,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
                inet6addr_notifier_call_chain(NETDEV_UP, ifa);
        else {
                kfree(ifa);
+               in6_dev_put(idev);
                ifa = ERR_PTR(err);
        }
 
index bfa941fc1165002903b5a0364e5584b075469e2e..9e3488d50b157eb8ee7f9be44370176c157a1c62 100644 (file)
@@ -88,6 +88,7 @@ int __ipv6_addr_type(const struct in6_addr *addr)
 EXPORT_SYMBOL(__ipv6_addr_type);
 
 static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
+static ATOMIC_NOTIFIER_HEAD(inet6addr_validator_chain);
 
 int register_inet6addr_notifier(struct notifier_block *nb)
 {
@@ -107,6 +108,24 @@ int inet6addr_notifier_call_chain(unsigned long val, void *v)
 }
 EXPORT_SYMBOL(inet6addr_notifier_call_chain);
 
+int register_inet6addr_validator_notifier(struct notifier_block *nb)
+{
+       return atomic_notifier_chain_register(&inet6addr_validator_chain, nb);
+}
+EXPORT_SYMBOL(register_inet6addr_validator_notifier);
+
+int unregister_inet6addr_validator_notifier(struct notifier_block *nb)
+{
+       return atomic_notifier_chain_unregister(&inet6addr_validator_chain, nb);
+}
+EXPORT_SYMBOL(unregister_inet6addr_validator_notifier);
+
+int inet6addr_validator_notifier_call_chain(unsigned long val, void *v)
+{
+       return atomic_notifier_call_chain(&inet6addr_validator_chain, val, v);
+}
+EXPORT_SYMBOL(inet6addr_validator_notifier_call_chain);
+
 static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1,
                                        struct dst_entry **u2,
                                        struct flowi6 *u3)
index bf8a58a1c32d83a9605844075da5815be23a6bf1..0d6f3b6345de26c329ae1d6f25dde652a5452d4b 100644 (file)
@@ -67,9 +67,6 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
        struct in6_addr *nexthop;
        int ret;
 
-       skb->protocol = htons(ETH_P_IPV6);
-       skb->dev = dev;
-
        if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
                struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
 
@@ -154,6 +151,9 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
        struct net_device *dev = skb_dst(skb)->dev;
        struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
 
+       skb->protocol = htons(ETH_P_IPV6);
+       skb->dev = dev;
+
        if (unlikely(idev->cnf.disable_ipv6)) {
                IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
                kfree_skb(skb);
@@ -869,7 +869,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
        if (skb->sk && dst_allfrag(skb_dst(skb)))
                sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
 
-       skb->dev = skb_dst(skb)->dev;
        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
        err = -EMSGSIZE;
 
index 971823359f5b98da46c39b86c9ddcefd14df8559..2f7e99af67dbfd2324d39086881b9475045d7e1f 100644 (file)
@@ -162,15 +162,16 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
 
        /* check for timestamp cookie support */
        memset(&tcp_opt, 0, sizeof(tcp_opt));
-       tcp_parse_options(skb, &tcp_opt, 0, NULL);
+       tcp_parse_options(sock_net(sk), skb, &tcp_opt, 0, NULL);
 
        if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
-               tsoff = secure_tcpv6_ts_off(ipv6_hdr(skb)->daddr.s6_addr32,
+               tsoff = secure_tcpv6_ts_off(sock_net(sk),
+                                           ipv6_hdr(skb)->daddr.s6_addr32,
                                            ipv6_hdr(skb)->saddr.s6_addr32);
                tcp_opt.rcv_tsecr -= tsoff;
        }
 
-       if (!cookie_timestamp_decode(&tcp_opt))
+       if (!cookie_timestamp_decode(sock_net(sk), &tcp_opt))
                goto out;
 
        ret = NULL;
index 233edfabe1dbceaeb6cdd42a2bb379072aeee361..84ad50218255dd64109c9cf834a8cff46a8ae45f 100644 (file)
@@ -109,9 +109,9 @@ static u32 tcp_v6_init_seq(const struct sk_buff *skb)
                                tcp_hdr(skb)->source);
 }
 
-static u32 tcp_v6_init_ts_off(const struct sk_buff *skb)
+static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
 {
-       return secure_tcpv6_ts_off(ipv6_hdr(skb)->daddr.s6_addr32,
+       return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
                                   ipv6_hdr(skb)->saddr.s6_addr32);
 }
 
@@ -292,7 +292,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                                                         sk->sk_v6_daddr.s6_addr32,
                                                         inet->inet_sport,
                                                         inet->inet_dport);
-               tp->tsoffset = secure_tcpv6_ts_off(np->saddr.s6_addr32,
+               tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
+                                                  np->saddr.s6_addr32,
                                                   sk->sk_v6_daddr.s6_addr32);
        }
 
@@ -1248,9 +1249,6 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
        if (skb->protocol == htons(ETH_P_IP))
                return tcp_v4_do_rcv(sk, skb);
 
-       if (tcp_filter(sk, skb))
-               goto discard;
-
        /*
         *      socket locking is here for SMP purposes as backlog rcv
         *      is currently called with bh processing disabled.
@@ -1909,6 +1907,7 @@ struct proto tcpv6_prot = {
        .unhash                 = inet_unhash,
        .get_port               = inet_csk_get_port,
        .enter_memory_pressure  = tcp_enter_memory_pressure,
+       .leave_memory_pressure  = tcp_leave_memory_pressure,
        .stream_memory_free     = tcp_stream_memory_free,
        .sockets_allocated      = &tcp_sockets_allocated,
        .memory_allocated       = &tcp_memory_allocated,
index 82ca49fba33606d019f009b9f97c9370b2a0136a..f9349a495caf0aed6a1c5106d32551dd8e11ee8a 100644 (file)
@@ -188,7 +188,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 #define BLOCK_PRIV(x)          ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
 
 struct packet_sock;
-static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                       struct packet_type *pt, struct net_device *orig_dev);
 
index a9a8c7d5a4a983b9be12fe85a7f71f3e6a825f19..c7a5d861906bb63d17ee4e1c8b9327f43e7dbede 100644 (file)
@@ -111,6 +111,9 @@ struct qrtr_node {
        struct list_head item;
 };
 
+static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb);
+static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb);
+
 /* Release node resources and free the node.
  *
  * Do not call directly, use qrtr_node_release.  To be used with
@@ -245,14 +248,11 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
 }
 EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
 
-/* Allocate and construct a resume-tx packet. */
-static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
-                                           u32 dst_node, u32 port)
+static struct sk_buff *qrtr_alloc_ctrl_packet(u32 type, size_t pkt_len,
+                                             u32 src_node, u32 dst_node)
 {
-       const int pkt_len = 20;
        struct qrtr_hdr *hdr;
        struct sk_buff *skb;
-       __le32 *buf;
 
        skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
        if (!skb)
@@ -261,7 +261,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
 
        hdr = (struct qrtr_hdr *)skb_put(skb, QRTR_HDR_SIZE);
        hdr->version = cpu_to_le32(QRTR_PROTO_VER);
-       hdr->type = cpu_to_le32(QRTR_TYPE_RESUME_TX);
+       hdr->type = cpu_to_le32(type);
        hdr->src_node_id = cpu_to_le32(src_node);
        hdr->src_port_id = cpu_to_le32(QRTR_PORT_CTRL);
        hdr->confirm_rx = cpu_to_le32(0);
@@ -269,6 +269,22 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
        hdr->dst_node_id = cpu_to_le32(dst_node);
        hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
 
+       return skb;
+}
+
+/* Allocate and construct a resume-tx packet. */
+static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
+                                           u32 dst_node, u32 port)
+{
+       const int pkt_len = 20;
+       struct sk_buff *skb;
+       __le32 *buf;
+
+       skb = qrtr_alloc_ctrl_packet(QRTR_TYPE_RESUME_TX, pkt_len,
+                                    src_node, dst_node);
+       if (!skb)
+               return NULL;
+
        buf = (__le32 *)skb_put(skb, pkt_len);
        memset(buf, 0, pkt_len);
        buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
@@ -278,6 +294,45 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
        return skb;
 }
 
+/* Allocate and construct a BYE message to signal remote termination */
+static struct sk_buff *qrtr_alloc_local_bye(u32 src_node)
+{
+       const int pkt_len = 20;
+       struct sk_buff *skb;
+       __le32 *buf;
+
+       skb = qrtr_alloc_ctrl_packet(QRTR_TYPE_BYE, pkt_len,
+                                    src_node, qrtr_local_nid);
+       if (!skb)
+               return NULL;
+
+       buf = (__le32 *)skb_put(skb, pkt_len);
+       memset(buf, 0, pkt_len);
+       buf[0] = cpu_to_le32(QRTR_TYPE_BYE);
+
+       return skb;
+}
+
+static struct sk_buff *qrtr_alloc_del_client(struct sockaddr_qrtr *sq)
+{
+       const int pkt_len = 20;
+       struct sk_buff *skb;
+       __le32 *buf;
+
+       skb = qrtr_alloc_ctrl_packet(QRTR_TYPE_DEL_CLIENT, pkt_len,
+                                    sq->sq_node, QRTR_NODE_BCAST);
+       if (!skb)
+               return NULL;
+
+       buf = (__le32 *)skb_put(skb, pkt_len);
+       memset(buf, 0, pkt_len);
+       buf[0] = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
+       buf[1] = cpu_to_le32(sq->sq_node);
+       buf[2] = cpu_to_le32(sq->sq_port);
+
+       return skb;
+}
+
 static struct qrtr_sock *qrtr_port_lookup(int port);
 static void qrtr_port_put(struct qrtr_sock *ipc);
 
@@ -369,11 +424,17 @@ EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
 void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
 {
        struct qrtr_node *node = ep->node;
+       struct sk_buff *skb;
 
        mutex_lock(&node->ep_lock);
        node->ep = NULL;
        mutex_unlock(&node->ep_lock);
 
+       /* Notify the local controller about the event */
+       skb = qrtr_alloc_local_bye(node->nid);
+       if (skb)
+               qrtr_local_enqueue(NULL, skb);
+
        qrtr_node_release(node);
        ep->node = NULL;
 }
@@ -408,8 +469,15 @@ static void qrtr_port_put(struct qrtr_sock *ipc)
 /* Remove port assignment. */
 static void qrtr_port_remove(struct qrtr_sock *ipc)
 {
+       struct sk_buff *skb;
        int port = ipc->us.sq_port;
 
+       skb = qrtr_alloc_del_client(&ipc->us);
+       if (skb) {
+               skb_set_owner_w(skb, &ipc->sk);
+               qrtr_bcast_enqueue(NULL, skb);
+       }
+
        if (port == QRTR_PORT_CTRL)
                port = 0;
 
@@ -462,6 +530,26 @@ static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
        return 0;
 }
 
+/* Reset all non-control ports */
+static void qrtr_reset_ports(void)
+{
+       struct qrtr_sock *ipc;
+       int id;
+
+       mutex_lock(&qrtr_port_lock);
+       idr_for_each_entry(&qrtr_ports, ipc, id) {
+               /* Don't reset control port */
+               if (id == 0)
+                       continue;
+
+               sock_hold(&ipc->sk);
+               ipc->sk.sk_err = ENETRESET;
+               wake_up_interruptible(sk_sleep(&ipc->sk));
+               sock_put(&ipc->sk);
+       }
+       mutex_unlock(&qrtr_port_lock);
+}
+
 /* Bind socket to address.
  *
  * Socket should be locked upon call.
@@ -490,6 +578,10 @@ static int __qrtr_bind(struct socket *sock,
 
        sock_reset_flag(sk, SOCK_ZAPPED);
 
+       /* Notify all open ports about the new controller */
+       if (port == QRTR_PORT_CTRL)
+               qrtr_reset_ports();
+
        return 0;
 }
 
index 0c4dc4a7832c9fd7c1a45fd9a4d06860101c2205..58ae0db52ea13380582804cfa5431f25141cf7ef 100644 (file)
@@ -262,6 +262,7 @@ static int rxrpc_listen(struct socket *sock, int backlog)
  * @srx: The address of the peer to contact
  * @key: The security context to use (defaults to socket setting)
  * @user_call_ID: The ID to use
+ * @tx_total_len: Total length of data to transmit during the call (or -1)
  * @gfp: The allocation constraints
  * @notify_rx: Where to send notifications instead of socket queue
  *
@@ -276,6 +277,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
                                           struct sockaddr_rxrpc *srx,
                                           struct key *key,
                                           unsigned long user_call_ID,
+                                          s64 tx_total_len,
                                           gfp_t gfp,
                                           rxrpc_notify_rx_t notify_rx)
 {
@@ -303,7 +305,8 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
        cp.security_level       = 0;
        cp.exclusive            = false;
        cp.service_id           = srx->srx_service;
-       call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, gfp);
+       call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len,
+                                    gfp);
        /* The socket has been unlocked. */
        if (!IS_ERR(call))
                call->notify_rx = notify_rx;
@@ -581,6 +584,34 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
        return ret;
 }
 
+/*
+ * Get socket options.
+ */
+static int rxrpc_getsockopt(struct socket *sock, int level, int optname,
+                           char __user *optval, int __user *_optlen)
+{
+       int optlen;
+       
+       if (level != SOL_RXRPC)
+               return -EOPNOTSUPP;
+
+       if (get_user(optlen, _optlen))
+               return -EFAULT;
+       
+       switch (optname) {
+       case RXRPC_SUPPORTED_CMSG:
+               if (optlen < sizeof(int))
+                       return -ETOOSMALL;
+               if (put_user(RXRPC__SUPPORTED - 1, (int __user *)optval) ||
+                   put_user(sizeof(int), _optlen))
+                       return -EFAULT;
+               return 0;
+               
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
 /*
  * permit an RxRPC socket to be polled
  */
@@ -784,7 +815,7 @@ static const struct proto_ops rxrpc_rpc_ops = {
        .listen         = rxrpc_listen,
        .shutdown       = rxrpc_shutdown,
        .setsockopt     = rxrpc_setsockopt,
-       .getsockopt     = sock_no_getsockopt,
+       .getsockopt     = rxrpc_getsockopt,
        .sendmsg        = rxrpc_sendmsg,
        .recvmsg        = rxrpc_recvmsg,
        .mmap           = sock_no_mmap,
index e9b536cb0acf8534371a52214e8ec32390d0254e..adbf37946450c54d8d2aef9f05b9be3c09b4706f 100644 (file)
@@ -528,6 +528,7 @@ struct rxrpc_call {
        struct rb_node          sock_node;      /* Node in rx->calls */
        struct sk_buff          *tx_pending;    /* Tx socket buffer being filled */
        wait_queue_head_t       waitq;          /* Wait queue for channel or Tx */
+       s64                     tx_total_len;   /* Total length left to be transmitted (or -1) */
        __be32                  crypto_buf[2];  /* Temporary packet crypto buffer */
        unsigned long           user_call_ID;   /* user-defined call ID */
        unsigned long           flags;
@@ -683,7 +684,7 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t);
 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
                                         struct rxrpc_conn_parameters *,
                                         struct sockaddr_rxrpc *,
-                                        unsigned long, gfp_t);
+                                        unsigned long, s64, gfp_t);
 void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
                         struct sk_buff *);
 void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
index 692110808baaa02db31ed1cf366ae39b2cadaff0..423030fd93bed29d1b2aac0e0f0c232ed42049bb 100644 (file)
@@ -127,6 +127,7 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
        rwlock_init(&call->state_lock);
        atomic_set(&call->usage, 1);
        call->debug_id = atomic_inc_return(&rxrpc_debug_id);
+       call->tx_total_len = -1;
 
        memset(&call->sock_node, 0xed, sizeof(call->sock_node));
 
@@ -201,6 +202,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
                                         struct rxrpc_conn_parameters *cp,
                                         struct sockaddr_rxrpc *srx,
                                         unsigned long user_call_ID,
+                                        s64 tx_total_len,
                                         gfp_t gfp)
        __releases(&rx->sk.sk_lock.slock)
 {
@@ -219,6 +221,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
                return call;
        }
 
+       call->tx_total_len = tx_total_len;
        trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
                         here, (const void *)user_call_ID);
 
index 5a4801e7f560d8a9a8ef06d93814f86f469c6dc7..2e636a525a651102da021b926a00e990e859bc04 100644 (file)
@@ -28,6 +28,15 @@ enum rxrpc_command {
        RXRPC_CMD_REJECT_BUSY,          /* [server] reject a call as busy */
 };
 
+struct rxrpc_send_params {
+       s64                     tx_total_len;   /* Total Tx data length (if send data) */
+       unsigned long           user_call_ID;   /* User's call ID */
+       u32                     abort_code;     /* Abort code to Tx (if abort) */
+       enum rxrpc_command      command : 8;    /* The command to implement */
+       bool                    exclusive;      /* Shared or exclusive call */
+       bool                    upgrade;        /* If the connection is upgradeable */
+};
+
 /*
  * wait for space to appear in the transmit/ACK window
  * - caller holds the socket locked
@@ -199,6 +208,13 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
 
        more = msg->msg_flags & MSG_MORE;
 
+       if (call->tx_total_len != -1) {
+               if (len > call->tx_total_len)
+                       return -EMSGSIZE;
+               if (!more && len != call->tx_total_len)
+                       return -EMSGSIZE;
+       }
+
        skb = call->tx_pending;
        call->tx_pending = NULL;
        rxrpc_see_skb(skb, rxrpc_skb_tx_seen);
@@ -291,6 +307,8 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
                        sp->remain -= copy;
                        skb->mark += copy;
                        copied += copy;
+                       if (call->tx_total_len != -1)
+                               call->tx_total_len -= copy;
                }
 
                /* check for the far side aborting the call or a network error
@@ -362,19 +380,12 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
 /*
  * extract control messages from the sendmsg() control buffer
  */
-static int rxrpc_sendmsg_cmsg(struct msghdr *msg,
-                             unsigned long *user_call_ID,
-                             enum rxrpc_command *command,
-                             u32 *abort_code,
-                             bool *_exclusive,
-                             bool *_upgrade)
+static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
 {
        struct cmsghdr *cmsg;
        bool got_user_ID = false;
        int len;
 
-       *command = RXRPC_CMD_SEND_DATA;
-
        if (msg->msg_controllen == 0)
                return -EINVAL;
 
@@ -394,49 +405,55 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg,
                        if (msg->msg_flags & MSG_CMSG_COMPAT) {
                                if (len != sizeof(u32))
                                        return -EINVAL;
-                               *user_call_ID = *(u32 *) CMSG_DATA(cmsg);
+                               p->user_call_ID = *(u32 *)CMSG_DATA(cmsg);
                        } else {
                                if (len != sizeof(unsigned long))
                                        return -EINVAL;
-                               *user_call_ID = *(unsigned long *)
+                               p->user_call_ID = *(unsigned long *)
                                        CMSG_DATA(cmsg);
                        }
-                       _debug("User Call ID %lx", *user_call_ID);
                        got_user_ID = true;
                        break;
 
                case RXRPC_ABORT:
-                       if (*command != RXRPC_CMD_SEND_DATA)
+                       if (p->command != RXRPC_CMD_SEND_DATA)
                                return -EINVAL;
-                       *command = RXRPC_CMD_SEND_ABORT;
-                       if (len != sizeof(*abort_code))
+                       p->command = RXRPC_CMD_SEND_ABORT;
+                       if (len != sizeof(p->abort_code))
                                return -EINVAL;
-                       *abort_code = *(unsigned int *) CMSG_DATA(cmsg);
-                       _debug("Abort %x", *abort_code);
-                       if (*abort_code == 0)
+                       p->abort_code = *(unsigned int *)CMSG_DATA(cmsg);
+                       if (p->abort_code == 0)
                                return -EINVAL;
                        break;
 
                case RXRPC_ACCEPT:
-                       if (*command != RXRPC_CMD_SEND_DATA)
+                       if (p->command != RXRPC_CMD_SEND_DATA)
                                return -EINVAL;
-                       *command = RXRPC_CMD_ACCEPT;
+                       p->command = RXRPC_CMD_ACCEPT;
                        if (len != 0)
                                return -EINVAL;
                        break;
 
                case RXRPC_EXCLUSIVE_CALL:
-                       *_exclusive = true;
+                       p->exclusive = true;
                        if (len != 0)
                                return -EINVAL;
                        break;
 
                case RXRPC_UPGRADE_SERVICE:
-                       *_upgrade = true;
+                       p->upgrade = true;
                        if (len != 0)
                                return -EINVAL;
                        break;
 
+               case RXRPC_TX_LENGTH:
+                       if (p->tx_total_len != -1 || len != sizeof(__s64))
+                               return -EINVAL;
+                       p->tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
+                       if (p->tx_total_len < 0)
+                               return -EINVAL;
+                       break;
+
                default:
                        return -EINVAL;
                }
@@ -444,6 +461,8 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg,
 
        if (!got_user_ID)
                return -EINVAL;
+       if (p->tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
+               return -EINVAL;
        _leave(" = 0");
        return 0;
 }
@@ -455,8 +474,7 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg,
  */
 static struct rxrpc_call *
 rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
-                                 unsigned long user_call_ID, bool exclusive,
-                                 bool upgrade)
+                                 struct rxrpc_send_params *p)
        __releases(&rx->sk.sk_lock.slock)
 {
        struct rxrpc_conn_parameters cp;
@@ -480,10 +498,11 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
        cp.local                = rx->local;
        cp.key                  = rx->key;
        cp.security_level       = rx->min_sec_level;
-       cp.exclusive            = rx->exclusive | exclusive;
-       cp.upgrade              = upgrade;
+       cp.exclusive            = rx->exclusive | p->exclusive;
+       cp.upgrade              = p->upgrade;
        cp.service_id           = srx->srx_service;
-       call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, GFP_KERNEL);
+       call = rxrpc_new_client_call(rx, &cp, srx, p->user_call_ID,
+                                    p->tx_total_len, GFP_KERNEL);
        /* The socket is now unlocked */
 
        _leave(" = %p\n", call);
@@ -499,26 +518,29 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
        __releases(&rx->sk.sk_lock.slock)
 {
        enum rxrpc_call_state state;
-       enum rxrpc_command cmd;
        struct rxrpc_call *call;
-       unsigned long user_call_ID = 0;
-       bool exclusive = false;
-       bool upgrade = true;
-       u32 abort_code = 0;
        int ret;
 
+       struct rxrpc_send_params p = {
+               .tx_total_len   = -1,
+               .user_call_ID   = 0,
+               .abort_code     = 0,
+               .command        = RXRPC_CMD_SEND_DATA,
+               .exclusive      = false,
+               .upgrade        = true,
+       };
+
        _enter("");
 
-       ret = rxrpc_sendmsg_cmsg(msg, &user_call_ID, &cmd, &abort_code,
-                                &exclusive, &upgrade);
+       ret = rxrpc_sendmsg_cmsg(msg, &p);
        if (ret < 0)
                goto error_release_sock;
 
-       if (cmd == RXRPC_CMD_ACCEPT) {
+       if (p.command == RXRPC_CMD_ACCEPT) {
                ret = -EINVAL;
                if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
                        goto error_release_sock;
-               call = rxrpc_accept_call(rx, user_call_ID, NULL);
+               call = rxrpc_accept_call(rx, p.user_call_ID, NULL);
                /* The socket is now unlocked. */
                if (IS_ERR(call))
                        return PTR_ERR(call);
@@ -526,13 +548,12 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                return 0;
        }
 
-       call = rxrpc_find_call_by_user_ID(rx, user_call_ID);
+       call = rxrpc_find_call_by_user_ID(rx, p.user_call_ID);
        if (!call) {
                ret = -EBADSLT;
-               if (cmd != RXRPC_CMD_SEND_DATA)
+               if (p.command != RXRPC_CMD_SEND_DATA)
                        goto error_release_sock;
-               call = rxrpc_new_client_call_for_sendmsg(rx, msg, user_call_ID,
-                                                        exclusive, upgrade);
+               call = rxrpc_new_client_call_for_sendmsg(rx, msg, &p);
                /* The socket is now unlocked... */
                if (IS_ERR(call))
                        return PTR_ERR(call);
@@ -556,6 +577,15 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                        ret = -ERESTARTSYS;
                        goto error_put;
                }
+
+               if (p.tx_total_len != -1) {
+                       ret = -EINVAL;
+                       if (call->tx_total_len != -1 ||
+                           call->tx_pending ||
+                           call->tx_top != 0)
+                               goto error_put;
+                       call->tx_total_len = p.tx_total_len;
+               }
        }
 
        state = READ_ONCE(call->state);
@@ -565,11 +595,11 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
        if (state >= RXRPC_CALL_COMPLETE) {
                /* it's too late for this call */
                ret = -ESHUTDOWN;
-       } else if (cmd == RXRPC_CMD_SEND_ABORT) {
+       } else if (p.command == RXRPC_CMD_SEND_ABORT) {
                ret = 0;
-               if (rxrpc_abort_call("CMD", call, 0, abort_code, -ECONNABORTED))
+               if (rxrpc_abort_call("CMD", call, 0, p.abort_code, -ECONNABORTED))
                        ret = rxrpc_send_abort_packet(call);
-       } else if (cmd != RXRPC_CMD_SEND_DATA) {
+       } else if (p.command != RXRPC_CMD_SEND_DATA) {
                ret = -EINVAL;
        } else if (rxrpc_is_client_call(call) &&
                   state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
@@ -673,5 +703,24 @@ bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
        mutex_unlock(&call->user_mutex);
        return aborted;
 }
-
 EXPORT_SYMBOL(rxrpc_kernel_abort_call);
+
+/**
+ * rxrpc_kernel_set_tx_length - Set the total Tx length on a call
+ * @sock: The socket the call is on
+ * @call: The call to be informed
+ * @tx_total_len: The amount of data to be transmitted for this call
+ *
+ * Allow a kernel service to set the total transmit length on a call.  This
+ * allows buffer-to-packet encrypt-and-copy to be performed.
+ *
+ * This function is primarily for use for setting the reply length since the
+ * request length can be set when beginning the call.
+ */
+void rxrpc_kernel_set_tx_length(struct socket *sock, struct rxrpc_call *call,
+                               s64 tx_total_len)
+{
+       WARN_ON(call->tx_total_len != -1);
+       call->tx_total_len = tx_total_len;
+}
+EXPORT_SYMBOL(rxrpc_kernel_set_tx_length);
index a9c56ad4533afb0789f32ca327ecd275bae3c1bb..be0cfdf489762cfbc28894214b1188ba3c1c5fda 100644 (file)
@@ -162,6 +162,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
        bpf_offload.gen_flags = prog->gen_flags;
 
        err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+                                           tp->chain->index,
                                            tp->protocol, &offload);
 
        if (!err && (cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE))
index 33feaee197cfd5b28e4fde72557d19d93a45d9b6..7832eb93379b3907c2b42d9cd304cfa998237396 100644 (file)
@@ -239,7 +239,8 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
        tc->type = TC_SETUP_CLSFLOWER;
        tc->cls_flower = &offload;
 
-       dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
+       dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->chain->index,
+                                     tp->protocol, tc);
 }
 
 static int fl_hw_replace_filter(struct tcf_proto *tp,
@@ -275,8 +276,8 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
        tc->type = TC_SETUP_CLSFLOWER;
        tc->cls_flower = &offload;
 
-       err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
-                                           tc);
+       err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+                                           tp->chain->index, tp->protocol, tc);
        if (!err)
                f->flags |= TCA_CLS_FLAGS_IN_HW;
 
@@ -302,7 +303,8 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
        tc->type = TC_SETUP_CLSFLOWER;
        tc->cls_flower = &offload;
 
-       dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
+       dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+                                     tp->chain->index, tp->protocol, tc);
 }
 
 static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
index 51859b8edd7eff3845ca3d5b5b0d900583736d4a..9dc26c32cf321e475a8f783cf423ff67562479fa 100644 (file)
@@ -64,8 +64,9 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
        offload.cls_mall->exts = &head->exts;
        offload.cls_mall->cookie = cookie;
 
-       err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
-                                           &offload);
+       err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+                                           tp->chain->index,
+                                           tp->protocol, &offload);
        if (!err)
                head->flags |= TCA_CLS_FLAGS_IN_HW;
 
@@ -86,8 +87,8 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp,
        offload.cls_mall->exts = NULL;
        offload.cls_mall->cookie = cookie;
 
-       dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
-                                            &offload);
+       dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->chain->index,
+                                     tp->protocol, &offload);
 }
 
 static void mall_destroy(struct tcf_proto *tp)
index d20e72a095d578e65eda0dafc62217146aeea1c1..2d01195153e6c80178328c65d3b94651f3e2094f 100644 (file)
@@ -441,7 +441,8 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
                offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
                offload.cls_u32->knode.handle = handle;
                dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
-                                             tp->protocol, &offload);
+                                             tp->chain->index, tp->protocol,
+                                             &offload);
        }
 }
 
@@ -465,7 +466,8 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
        offload.cls_u32->hnode.prio = h->prio;
 
        err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
-                                           tp->protocol, &offload);
+                                           tp->chain->index, tp->protocol,
+                                           &offload);
        if (tc_skip_sw(flags))
                return err;
 
@@ -488,7 +490,8 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
                offload.cls_u32->hnode.prio = h->prio;
 
                dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
-                                             tp->protocol, &offload);
+                                             tp->chain->index, tp->protocol,
+                                             &offload);
        }
 }
 
@@ -522,7 +525,8 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
                offload.cls_u32->knode.link_handle = n->ht_down->handle;
 
        err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
-                                           tp->protocol, &offload);
+                                           tp->chain->index, tp->protocol,
+                                           &offload);
 
        if (!err)
                n->flags |= TCA_CLS_FLAGS_IN_HW;
index 0a4cf27ea54bd78768d4fa084f7b082460f5f266..e0c02725cd487c2a2c5f063066f29faab8ae479d 100644 (file)
@@ -43,7 +43,7 @@ static void mqprio_destroy(struct Qdisc *sch)
                struct tc_to_netdev tc = { .type = TC_SETUP_MQPRIO,
                                           { .mqprio = &offload } };
 
-               dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
+               dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, 0, &tc);
        } else {
                netdev_set_num_tc(dev, 0);
        }
@@ -152,7 +152,8 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
                struct tc_to_netdev tc = { .type = TC_SETUP_MQPRIO,
                                           { .mqprio = &offload } };
 
-               err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
+               err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle,
+                                                   0, 0, &tc);
                if (err)
                        return err;
 
index 288c5e0cda5d1f89fffe40279ae8a1610acd56b8..72b07dd9b9595cb76326fb60919f462f3cecbd3a 100644 (file)
@@ -1181,12 +1181,8 @@ void sctp_assoc_update(struct sctp_association *asoc,
                if (sctp_state(asoc, COOKIE_WAIT))
                        sctp_stream_update(&asoc->stream, &new->stream);
 
-               if (!asoc->assoc_id) {
-                       /* get a new association id since we don't have one
-                        * yet.
-                        */
-                       sctp_assoc_set_id(asoc, GFP_ATOMIC);
-               }
+               /* get a new assoc id if we don't have one yet. */
+               sctp_assoc_set_id(asoc, GFP_ATOMIC);
        }
 
        /* SCTP-AUTH: Save the peer parameters from the new associations
index 5a27d0f03df53be07ec50a245f0ce4a35c32ef76..8e34db56bc1db2a118722b5d88ab5e3f757f2fca 100644 (file)
@@ -218,8 +218,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
                return -ENOMEM;
 
        head = &sctp_ep_hashtable[hash];
-       local_bh_disable();
-       read_lock(&head->lock);
+       read_lock_bh(&head->lock);
        sctp_for_each_hentry(epb, &head->chain) {
                ep = sctp_ep(epb);
                sk = epb->sk;
@@ -234,8 +233,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
                sctp_seq_dump_local_addrs(seq, epb);
                seq_printf(seq, "\n");
        }
-       read_unlock(&head->lock);
-       local_bh_enable();
+       read_unlock_bh(&head->lock);
 
        return 0;
 }
index 0822046e4f3f5a1acd3f5382d915bf9004a25c1c..32d5495e793cdcc942f59c8b3e3279752d24765b 100644 (file)
@@ -103,7 +103,7 @@ static int sctp_autobind(struct sock *sk);
 static void sctp_sock_migrate(struct sock *, struct sock *,
                              struct sctp_association *, sctp_socket_type_t);
 
-static int sctp_memory_pressure;
+static unsigned long sctp_memory_pressure;
 static atomic_long_t sctp_memory_allocated;
 struct percpu_counter sctp_sockets_allocated;
 
@@ -1494,7 +1494,7 @@ static void sctp_close(struct sock *sk, long timeout)
 
        pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
 
-       lock_sock(sk);
+       lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
        sk->sk_shutdown = SHUTDOWN_MASK;
        sk->sk_state = SCTP_SS_CLOSING;
 
@@ -1544,7 +1544,7 @@ static void sctp_close(struct sock *sk, long timeout)
         * held and that should be grabbed before socket lock.
         */
        spin_lock_bh(&net->sctp.addr_wq_lock);
-       bh_lock_sock(sk);
+       bh_lock_sock_nested(sk);
 
        /* Hold the sock, since sk_common_release() will put sock_put()
         * and we have just a little more cleanup.
index 8d40a7d31c9908c22b757fc0ab92bce436211c90..25dc67ef9d37084f55ff7a49a2daabee6556a42c 100644 (file)
@@ -571,24 +571,17 @@ int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
 }
 EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
 
-static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
+static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
 
 /**
  *     register_switchdev_notifier - Register notifier
  *     @nb: notifier_block
  *
- *     Register switch device notifier. This should be used by code
- *     which needs to monitor events happening in particular device.
- *     Return values are same as for atomic_notifier_chain_register().
+ *     Register switch device notifier.
  */
 int register_switchdev_notifier(struct notifier_block *nb)
 {
-       int err;
-
-       rtnl_lock();
-       err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
-       rtnl_unlock();
-       return err;
+       return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
 }
 EXPORT_SYMBOL_GPL(register_switchdev_notifier);
 
@@ -597,16 +590,10 @@ EXPORT_SYMBOL_GPL(register_switchdev_notifier);
  *     @nb: notifier_block
  *
  *     Unregister switch device notifier.
- *     Return values are same as for atomic_notifier_chain_unregister().
  */
 int unregister_switchdev_notifier(struct notifier_block *nb)
 {
-       int err;
-
-       rtnl_lock();
-       err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
-       rtnl_unlock();
-       return err;
+       return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
 }
 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
 
@@ -616,18 +603,13 @@ EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
  *     @dev: port device
  *     @info: notifier information data
  *
- *     Call all network notifier blocks. This should be called by driver
- *     when it needs to propagate hardware event.
- *     Return values are same as for atomic_notifier_call_chain().
- *     rtnl_lock must be held.
+ *     Call all network notifier blocks.
  */
 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
                             struct switchdev_notifier_info *info)
 {
-       ASSERT_RTNL();
-
        info->dev = dev;
-       return raw_notifier_call_chain(&switchdev_notif_chain, val, info);
+       return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
 }
 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
 
index 9b2c10b45733e4dc66d601ac2ea52fed65f3752d..f94b48b168dcc82cbd67b916951a4699f4f675ef 100644 (file)
@@ -513,6 +513,11 @@ union bpf_attr {
  *     Get the owner uid of the socket stored inside sk_buff.
  *     @skb: pointer to skb
  *     Return: uid of the socket owner on success or overflowuid if failed.
+ *
+ * u32 bpf_set_hash(skb, hash)
+ *     Set full skb->hash.
+ *     @skb: pointer to skb
+ *     @hash: hash to set
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
@@ -562,7 +567,8 @@ union bpf_attr {
        FN(xdp_adjust_head),            \
        FN(probe_read_str),             \
        FN(get_socket_cookie),          \
-       FN(get_socket_uid),
+       FN(get_socket_uid),             \
+       FN(set_hash),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
index 9644d4e069dec3e43ec654f6f44d933a02a7878a..1426594fdf6bcfc1384f62f140ccc497b104077d 100644 (file)
@@ -9,6 +9,8 @@
 #include <stddef.h>
 #include <stdbool.h>
 
+#include <sys/resource.h>
+
 #include <linux/unistd.h>
 #include <linux/filter.h>
 #include <linux/bpf_perf_event.h>
@@ -432,6 +434,9 @@ static int do_test(unsigned int from, unsigned int to)
 int main(int argc, char **argv)
 {
        unsigned int from = 0, to = ARRAY_SIZE(tests);
+       struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
+
+       setrlimit(RLIMIT_MEMLOCK, &rinf);
 
        if (argc == 3) {
                unsigned int l = atoi(argv[argc - 2]);
index 93314524de0d033ded2a00d71bbf500a0afbd60e..79601c81e169c22650ae545b6a5fefb8323655ee 100644 (file)
@@ -239,6 +239,54 @@ static void test_hashmap_percpu(int task, void *data)
        close(fd);
 }
 
+static void test_hashmap_walk(int task, void *data)
+{
+       int fd, i, max_entries = 100000;
+       long long key, value, next_key;
+       bool next_key_valid = true;
+
+       fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
+                           max_entries, map_flags);
+       if (fd < 0) {
+               printf("Failed to create hashmap '%s'!\n", strerror(errno));
+               exit(1);
+       }
+
+       for (i = 0; i < max_entries; i++) {
+               key = i; value = key;
+               assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0);
+       }
+
+       for (i = 0; bpf_map_get_next_key(fd, !i ? NULL : &key,
+                                        &next_key) == 0; i++) {
+               key = next_key;
+               assert(bpf_map_lookup_elem(fd, &key, &value) == 0);
+       }
+
+       assert(i == max_entries);
+
+       assert(bpf_map_get_next_key(fd, NULL, &key) == 0);
+       for (i = 0; next_key_valid; i++) {
+               next_key_valid = bpf_map_get_next_key(fd, &key, &next_key) == 0;
+               assert(bpf_map_lookup_elem(fd, &key, &value) == 0);
+               value++;
+               assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0);
+               key = next_key;
+       }
+
+       assert(i == max_entries);
+
+       for (i = 0; bpf_map_get_next_key(fd, !i ? NULL : &key,
+                                        &next_key) == 0; i++) {
+               key = next_key;
+               assert(bpf_map_lookup_elem(fd, &key, &value) == 0);
+               assert(value - 1 == key);
+       }
+
+       assert(i == max_entries);
+       close(fd);
+}
+
 static void test_arraymap(int task, void *data)
 {
        int key, next_key, fd;
@@ -464,6 +512,7 @@ static void test_map_stress(void)
        run_parallel(100, test_hashmap, NULL);
        run_parallel(100, test_hashmap_percpu, NULL);
        run_parallel(100, test_hashmap_sizes, NULL);
+       run_parallel(100, test_hashmap_walk, NULL);
 
        run_parallel(100, test_arraymap, NULL);
        run_parallel(100, test_arraymap_percpu, NULL);
@@ -549,6 +598,7 @@ static void run_all_tests(void)
 {
        test_hashmap(0, NULL);
        test_hashmap_percpu(0, NULL);
+       test_hashmap_walk(0, NULL);
 
        test_arraymap(0, NULL);
        test_arraymap_percpu(0, NULL);
index d8723aaf827a3eb4162457d87fabdedeb9aa9344..880d2963b4725192b6243200a9d81e0fe56329ab 100644 (file)
@@ -23,8 +23,8 @@ struct bpf_map_def SEC("maps") test_map_id = {
        .max_entries = 1,
 };
 
-SEC("test_prog_id")
-int test_prog_id(struct __sk_buff *skb)
+SEC("test_obj_id_dummy")
+int test_obj_id(struct __sk_buff *skb)
 {
        __u32 key = 0;
        __u64 *value;
index 8189bfc7e2773869f794408aa41be086b19aa6e6..fec13ab84fca43dba7a616e676c29563be7c3892 100644 (file)
@@ -23,7 +23,7 @@ typedef __u16 __sum16;
 #include <sys/wait.h>
 #include <sys/resource.h>
 #include <sys/types.h>
-#include <pwd.h>
+#include <fcntl.h>
 
 #include <linux/bpf.h>
 #include <linux/err.h>
@@ -297,6 +297,7 @@ static void test_bpf_obj_id(void)
        const __u32 array_key = 0;
        const int nr_iters = 2;
        const char *file = "./test_obj_id.o";
+       const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
 
        struct bpf_object *objs[nr_iters];
        int prog_fds[nr_iters], map_fds[nr_iters];
@@ -305,9 +306,18 @@ static void test_bpf_obj_id(void)
        struct bpf_map_info map_infos[nr_iters + 1];
        char jited_insns[128], xlated_insns[128];
        __u32 i, next_id, info_len, nr_id_found, duration = 0;
-       int err = 0;
+       int sysctl_fd, jit_enabled = 0, err = 0;
        __u64 array_value;
 
+       sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
+       if (sysctl_fd != -1) {
+               char tmpc;
+
+               if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
+                       jit_enabled = (tmpc != '0');
+               close(sysctl_fd);
+       }
+
        err = bpf_prog_get_fd_by_id(0);
        CHECK(err >= 0 || errno != ENOENT,
              "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
@@ -339,13 +349,14 @@ static void test_bpf_obj_id(void)
                if (CHECK(err ||
                          prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
                          info_len != sizeof(struct bpf_prog_info) ||
-                         !prog_infos[i].jited_prog_len ||
+                         (jit_enabled && !prog_infos[i].jited_prog_len) ||
                          !prog_infos[i].xlated_prog_len,
                          "get-prog-info(fd)",
-                         "err %d errno %d i %d type %d(%d) info_len %u(%lu) jited_prog_len %u xlated_prog_len %u\n",
+                         "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u\n",
                          err, errno, i,
                          prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
                          info_len, sizeof(struct bpf_prog_info),
+                         jit_enabled,
                          prog_infos[i].jited_prog_len,
                          prog_infos[i].xlated_prog_len))
                        goto done;