From: Linus Torvalds Date: Wed, 8 May 2013 22:29:48 +0000 (-0700) Subject: Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland... X-Git-Tag: v3.10-rc1~43 X-Git-Url: https://asedeno.scripts.mit.edu/gitweb/?a=commitdiff_plain;h=e0fd9affeb64088eff407dfc98bbd3a5c17ea479;hp=-c;p=linux.git Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband Pull InfiniBand/RDMA changes from Roland Dreier: - XRC transport fixes - Fix DHCP on IPoIB - mlx4 preparations for flow steering - iSER fixes - miscellaneous other fixes * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (23 commits) IB/iser: Add support for iser CM REQ additional info IB/iser: Return error to upper layers on EAGAIN registration failures IB/iser: Move informational messages from error to info level IB/iser: Add module version mlx4_core: Expose a few helpers to fill DMFS HW strucutures mlx4_core: Directly expose fields of DMFS HW rule control segment mlx4_core: Change a few DMFS fields names to match firmare spec mlx4: Match DMFS promiscuous field names to firmware spec mlx4_core: Move DMFS HW structs to common header file IB/mlx4: Set link type for RAW PACKET QPs in the QP context IB/mlx4: Disable VLAN stripping for RAW PACKET QPs mlx4_core: Reduce warning message for SRQ_LIMIT event to debug level RDMA/iwcm: Don't touch cmid after dropping reference IB/qib: Correct qib_verbs_register_sysfs() error handling IB/ipath: Correct ipath_verbs_register_sysfs() error handling RDMA/cxgb4: Fix SQ allocation when on-chip SQ is disabled SRPT: Fix odd use of WARN_ON() IPoIB: Fix ipoib_hard_header() return value RDMA: Rename random32() to prandom_u32() RDMA/cxgb3: Fix uninitialized variable ... --- e0fd9affeb64088eff407dfc98bbd3a5c17ea479 diff --combined drivers/infiniband/hw/cxgb4/qp.c index 5b059e2d80cc,ed49ab345b6e..232040447e8a --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@@ -42,21 -42,10 +42,21 @@@ static int ocqp_support = 1 module_param(ocqp_support, int, 0644); MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)"); -int db_fc_threshold = 2000; +int db_fc_threshold = 1000; module_param(db_fc_threshold, int, 0644); -MODULE_PARM_DESC(db_fc_threshold, "QP count/threshold that triggers automatic " - "db flow control mode (default = 2000)"); +MODULE_PARM_DESC(db_fc_threshold, + "QP count/threshold that triggers" + " automatic db flow control mode (default = 1000)"); + +int db_coalescing_threshold; +module_param(db_coalescing_threshold, int, 0644); +MODULE_PARM_DESC(db_coalescing_threshold, + "QP count/threshold that triggers" + " disabling db coalescing (default = 0)"); + +static int max_fr_immd = T4_MAX_FR_IMMD; +module_param(max_fr_immd, int, 0644); +MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate"); static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) { @@@ -87,7 -76,7 +87,7 @@@ static void dealloc_sq(struct c4iw_rde static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) { - if (!ocqp_support || !t4_ocqp_supported()) + if (!ocqp_support || !ocqp_supported(&rdev->lldi)) return -ENOSYS; sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize); if (!sq->dma_addr) @@@ -111,6 -100,16 +111,16 @@@ static int alloc_host_sq(struct c4iw_rd return 0; } + static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user) + { + int ret = -ENOSYS; + if (user) + ret = alloc_oc_sq(rdev, sq); + if (ret) + ret = alloc_host_sq(rdev, sq); + return ret; + } + static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct c4iw_dev_ucontext *uctx) { @@@ -140,7 -139,7 +150,7 @@@ static int create_qp(struct c4iw_rdev * int wr_len; struct c4iw_wr_wait wr_wait; struct sk_buff *skb; - int ret; + int ret = 0; int eqsize; wq->sq.qid = c4iw_get_qpid(rdev, uctx); @@@ -179,15 -178,9 +189,9 @@@ goto free_sw_rq; } - if (user) { - if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq)) - goto free_hwaddr; - } else { - ret = alloc_host_sq(rdev, &wq->sq); - if (ret) - goto free_hwaddr; - } - + ret = alloc_sq(rdev, &wq->sq, user); + if (ret) + goto free_hwaddr; memset(wq->sq.queue, 0, wq->sq.memsize); dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); @@@ -542,7 -535,7 +546,7 @@@ static int build_rdma_recv(struct c4iw_ } static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, - struct ib_send_wr *wr, u8 *len16) + struct ib_send_wr *wr, u8 *len16, u8 t5dev) { struct fw_ri_immd *imdp; @@@ -564,51 -557,28 +568,51 @@@ wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start & 0xffffffff); - WARN_ON(pbllen > T4_MAX_FR_IMMD); - imdp = (struct fw_ri_immd *)(&wqe->fr + 1); - imdp->op = FW_RI_DATA_IMMD; - imdp->r1 = 0; - imdp->r2 = 0; - imdp->immdlen = cpu_to_be32(pbllen); - p = (__be64 *)(imdp + 1); - rem = pbllen; - for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { - *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]); - rem -= sizeof *p; - if (++p == (__be64 *)&sq->queue[sq->size]) - p = (__be64 *)sq->queue; - } - BUG_ON(rem < 0); - while (rem) { - *p = 0; - rem -= sizeof *p; - if (++p == (__be64 *)&sq->queue[sq->size]) - p = (__be64 *)sq->queue; + + if (t5dev && use_dsgl && (pbllen > max_fr_immd)) { + struct c4iw_fr_page_list *c4pl = + to_c4iw_fr_page_list(wr->wr.fast_reg.page_list); + struct fw_ri_dsgl *sglp; + + for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { + wr->wr.fast_reg.page_list->page_list[i] = (__force u64) + cpu_to_be64((u64) + wr->wr.fast_reg.page_list->page_list[i]); + } + + sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1); + sglp->op = FW_RI_DATA_DSGL; + sglp->r1 = 0; + sglp->nsge = cpu_to_be16(1); + sglp->addr0 = cpu_to_be64(c4pl->dma_addr); + sglp->len0 = cpu_to_be32(pbllen); + + *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16); + } else { + imdp = (struct fw_ri_immd *)(&wqe->fr + 1); + imdp->op = FW_RI_DATA_IMMD; + imdp->r1 = 0; + imdp->r2 = 0; + imdp->immdlen = cpu_to_be32(pbllen); + p = (__be64 *)(imdp + 1); + rem = pbllen; + for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { + *p = cpu_to_be64( + (u64)wr->wr.fast_reg.page_list->page_list[i]); + rem -= sizeof(*p); + if (++p == (__be64 *)&sq->queue[sq->size]) + p = (__be64 *)sq->queue; + } + BUG_ON(rem < 0); + while (rem) { + *p = 0; + rem -= sizeof(*p); + if (++p == (__be64 *)&sq->queue[sq->size]) + p = (__be64 *)sq->queue; + } + *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp) + + pbllen, 16); } - *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16); return 0; } @@@ -709,10 -679,7 +713,10 @@@ int c4iw_post_send(struct ib_qp *ibqp, case IB_WR_FAST_REG_MR: fw_opcode = FW_RI_FR_NSMR_WR; swsqe->opcode = FW_RI_FAST_REGISTER; - err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16); + err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16, + is_t5( + qhp->rhp->rdev.lldi.adapter_type) ? + 1 : 0); break; case IB_WR_LOCAL_INV: if (wr->send_flags & IB_SEND_FENCE) @@@ -1484,9 -1451,6 +1488,9 @@@ int c4iw_destroy_qp(struct ib_qp *ib_qp rhp->db_state = NORMAL; idr_for_each(&rhp->qpidr, enable_qp_db, NULL); } + if (db_coalescing_threshold >= 0) + if (rhp->qpcnt <= db_coalescing_threshold) + cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]); spin_unlock_irq(&rhp->lock); atomic_dec(&qhp->refcnt); wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); @@@ -1598,15 -1562,11 +1602,15 @@@ struct ib_qp *c4iw_create_qp(struct ib_ spin_lock_irq(&rhp->lock); if (rhp->db_state != NORMAL) t4_disable_wq_db(&qhp->wq); - if (++rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) { + rhp->qpcnt++; + if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) { rhp->rdev.stats.db_state_transitions++; rhp->db_state = FLOW_CONTROL; idr_for_each(&rhp->qpidr, disable_qp_db, NULL); } + if (db_coalescing_threshold >= 0) + if (rhp->qpcnt > db_coalescing_threshold) + cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]); ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); spin_unlock_irq(&rhp->lock); if (ret) diff --combined drivers/infiniband/hw/mlx4/cq.c index 73b3a7132587,dab4b5188a27..d5e60f44ba5a --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@@ -33,6 -33,7 +33,7 @@@ #include #include + #include #include #include "mlx4_ib.h" @@@ -228,7 -229,7 +229,7 @@@ struct ib_cq *mlx4_ib_create_cq(struct vector = dev->eq_table[vector % ibdev->num_comp_vectors]; err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, - cq->db.dma, &cq->mcq, vector, 0); + cq->db.dma, &cq->mcq, vector, 0, 0); if (err) goto err_dbmap; @@@ -585,6 -586,7 +586,7 @@@ static int mlx4_ib_poll_one(struct mlx4 struct mlx4_qp *mqp; struct mlx4_ib_wq *wq; struct mlx4_ib_srq *srq; + struct mlx4_srq *msrq = NULL; int is_send; int is_error; u32 g_mlpath_rqpn; @@@ -653,6 -655,20 +655,20 @@@ repoll wc->qp = &(*cur_qp)->ibqp; + if (wc->qp->qp_type == IB_QPT_XRC_TGT) { + u32 srq_num; + g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); + srq_num = g_mlpath_rqpn & 0xffffff; + /* SRQ is also in the radix tree */ + msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, + srq_num); + if (unlikely(!msrq)) { + pr_warn("CQ %06x with entry for unknown SRQN %06x\n", + cq->mcq.cqn, srq_num); + return -EINVAL; + } + } + if (is_send) { wq = &(*cur_qp)->sq; if (!(*cur_qp)->sq_signal_bits) { @@@ -666,6 -682,11 +682,11 @@@ wqe_ctr = be16_to_cpu(cqe->wqe_index); wc->wr_id = srq->wrid[wqe_ctr]; mlx4_ib_free_srq_wqe(srq, wqe_ctr); + } else if (msrq) { + srq = to_mibsrq(msrq); + wqe_ctr = be16_to_cpu(cqe->wqe_index); + wc->wr_id = srq->wrid[wqe_ctr]; + mlx4_ib_free_srq_wqe(srq, wqe_ctr); } else { wq = &(*cur_qp)->rq; tail = wq->tail & (wq->wqe_cnt - 1); diff --combined drivers/infiniband/ulp/ipoib/ipoib_main.c index 554b9063da54,31dd2a7a880f..b6e049a3c7a8 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@@ -730,8 -730,7 +730,8 @@@ static int ipoib_start_xmit(struct sk_b if ((header->proto != htons(ETH_P_IP)) && (header->proto != htons(ETH_P_IPV6)) && (header->proto != htons(ETH_P_ARP)) && - (header->proto != htons(ETH_P_RARP))) { + (header->proto != htons(ETH_P_RARP)) && + (header->proto != htons(ETH_P_TIPC))) { /* ethertype not supported by IPoIB */ ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); @@@ -752,7 -751,6 +752,7 @@@ switch (header->proto) { case htons(ETH_P_IP): case htons(ETH_P_IPV6): + case htons(ETH_P_TIPC): neigh = ipoib_neigh_get(dev, cb->hwaddr); if (unlikely(!neigh)) { neigh_add_path(skb, cb->hwaddr, dev); @@@ -830,7 -828,7 +830,7 @@@ static int ipoib_hard_header(struct sk_ */ memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN); - return 0; + return sizeof *header; } static void ipoib_set_mcast_list(struct net_device *dev) diff --combined drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index bcf4d118e98c,20476844fb20..c9e6b62dd000 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@@ -889,7 -889,7 +889,7 @@@ static int mlx4_en_flow_replace(struct .queue_mode = MLX4_NET_TRANS_Q_FIFO, .exclusive = 0, .allow_loopback = 1, - .promisc_mode = MLX4_FS_PROMISC_NONE, + .promisc_mode = MLX4_FS_REGULAR, }; rule.port = priv->port; @@@ -1147,35 -1147,6 +1147,35 @@@ out return err; } +static int mlx4_en_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int ret; + + ret = ethtool_op_get_ts_info(dev, info); + if (ret) + return ret; + + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { + info->so_timestamping |= + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + } + + return ret; +} + const struct ethtool_ops mlx4_en_ethtool_ops = { .get_drvinfo = mlx4_en_get_drvinfo, .get_settings = mlx4_en_get_settings, @@@ -1202,7 -1173,6 +1202,7 @@@ .set_rxfh_indir = mlx4_en_set_rxfh_indir, .get_channels = mlx4_en_get_channels, .set_channels = mlx4_en_set_channels, + .get_ts_info = mlx4_en_get_ts_info, }; diff --combined drivers/net/ethernet/mellanox/mlx4/en_netdev.c index a69a908614e6,0860130f2b17..b35f94700093 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@@ -127,7 -127,7 +127,7 @@@ static void mlx4_en_filter_work(struct .queue_mode = MLX4_NET_TRANS_Q_LIFO, .exclusive = 1, .allow_loopback = 1, - .promisc_mode = MLX4_FS_PROMISC_NONE, + .promisc_mode = MLX4_FS_REGULAR, .port = priv->port, .priority = MLX4_DOMAIN_RFS, }; @@@ -356,8 -356,7 +356,8 @@@ static void mlx4_en_filter_rfs_expire(s } #endif -static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; @@@ -382,8 -381,7 +382,8 @@@ return 0; } -static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; @@@ -448,7 -446,7 +448,7 @@@ static int mlx4_en_uc_steer_add(struct .queue_mode = MLX4_NET_TRANS_Q_FIFO, .exclusive = 0, .allow_loopback = 1, - .promisc_mode = MLX4_FS_PROMISC_NONE, + .promisc_mode = MLX4_FS_REGULAR, .priority = MLX4_DOMAIN_NIC, }; @@@ -795,7 -793,7 +795,7 @@@ static void mlx4_en_set_promisc_mode(st err = mlx4_flow_steer_promisc_add(mdev->dev, priv->port, priv->base_qpn, - MLX4_FS_PROMISC_UPLINK); + MLX4_FS_ALL_DEFAULT); if (err) en_err(priv, "Failed enabling promiscuous mode\n"); priv->flags |= MLX4_EN_FLAG_MC_PROMISC; @@@ -858,7 -856,7 +858,7 @@@ static void mlx4_en_clear_promisc_mode( case MLX4_STEERING_MODE_DEVICE_MANAGED: err = mlx4_flow_steer_promisc_remove(mdev->dev, priv->port, - MLX4_FS_PROMISC_UPLINK); + MLX4_FS_ALL_DEFAULT); if (err) en_err(priv, "Failed disabling promiscuous mode\n"); priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; @@@ -919,7 -917,7 +919,7 @@@ static void mlx4_en_do_multicast(struc err = mlx4_flow_steer_promisc_add(mdev->dev, priv->port, priv->base_qpn, - MLX4_FS_PROMISC_ALL_MULTI); + MLX4_FS_MC_DEFAULT); break; case MLX4_STEERING_MODE_B0: @@@ -942,7 -940,7 +942,7 @@@ case MLX4_STEERING_MODE_DEVICE_MANAGED: err = mlx4_flow_steer_promisc_remove(mdev->dev, priv->port, - MLX4_FS_PROMISC_ALL_MULTI); + MLX4_FS_MC_DEFAULT); break; case MLX4_STEERING_MODE_B0: @@@ -1361,27 -1359,6 +1361,27 @@@ static void mlx4_en_do_get_stats(struc mutex_unlock(&mdev->state_lock); } +/* mlx4_en_service_task - Run service task for tasks that needed to be done + * periodically + */ +static void mlx4_en_service_task(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, + service_task); + struct mlx4_en_dev *mdev = priv->mdev; + + mutex_lock(&mdev->state_lock); + if (mdev->device_up) { + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) + mlx4_en_ptp_overflow_check(mdev); + + queue_delayed_work(mdev->workqueue, &priv->service_task, + SERVICE_TASK_DELAY); + } + mutex_unlock(&mdev->state_lock); +} + static void mlx4_en_linkstate(struct work_struct *work) { struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, @@@ -1621,10 -1598,10 +1621,10 @@@ void mlx4_en_stop_port(struct net_devic MLX4_EN_FLAG_MC_PROMISC); mlx4_flow_steer_promisc_remove(mdev->dev, priv->port, - MLX4_FS_PROMISC_UPLINK); + MLX4_FS_ALL_DEFAULT); mlx4_flow_steer_promisc_remove(mdev->dev, priv->port, - MLX4_FS_PROMISC_ALL_MULTI); + MLX4_FS_MC_DEFAULT); } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { priv->flags &= ~MLX4_EN_FLAG_PROMISC; @@@ -1886,7 -1863,6 +1886,7 @@@ void mlx4_en_destroy_netdev(struct net_ mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); cancel_delayed_work(&priv->stats_task); + cancel_delayed_work(&priv->service_task); /* flush any pending task for this netdev */ flush_workqueue(mdev->workqueue); @@@ -1938,75 -1914,6 +1938,75 @@@ static int mlx4_en_change_mtu(struct ne return 0; } +static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct hwtstamp_config config; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + /* device doesn't support time stamping */ + if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) + return -EINVAL; + + /* TX HW timestamp */ + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + /* RX HW timestamp */ + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) { + config.tx_type = HWTSTAMP_TX_OFF; + config.rx_filter = HWTSTAMP_FILTER_NONE; + } + + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; +} + +static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCSHWTSTAMP: + return mlx4_en_hwtstamp_ioctl(dev, ifr); + default: + return -EOPNOTSUPP; + } +} + static int mlx4_en_set_features(struct net_device *netdev, netdev_features_t features) { @@@ -2024,40 -1931,77 +2024,40 @@@ } -static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, u16 flags) +static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) { - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_dev *mdev = priv->mdev->dev; - int err; - - if (!mlx4_is_mfunc(mdev)) - return -EOPNOTSUPP; + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + u64 mac_u64 = mlx4_en_mac_to_u64(mac); - /* Hardware does not support aging addresses, allow only - * permanent addresses if ndm_state is given - */ - if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { - en_info(priv, "Add FDB only supports static addresses\n"); + if (!is_valid_ether_addr(mac)) return -EINVAL; - } - if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) - err = dev_uc_add_excl(dev, addr); - else if (is_multicast_ether_addr(addr)) - err = dev_mc_add_excl(dev, addr); - else - err = -EINVAL; - - /* Only return duplicate errors if NLM_F_EXCL is set */ - if (err == -EEXIST && !(flags & NLM_F_EXCL)) - err = 0; - - return err; + return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); } -static int mlx4_en_fdb_del(struct ndmsg *ndm, - struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr) +static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos) { - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_dev *mdev = priv->mdev->dev; - int err; - - if (!mlx4_is_mfunc(mdev)) - return -EOPNOTSUPP; + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; - if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { - en_info(priv, "Del FDB only supports static addresses\n"); - return -EINVAL; - } + return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos); +} - if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) - err = dev_uc_del(dev, addr); - else if (is_multicast_ether_addr(addr)) - err = dev_mc_del(dev, addr); - else - err = -EINVAL; +static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; - return err; + return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting); } -static int mlx4_en_fdb_dump(struct sk_buff *skb, - struct netlink_callback *cb, - struct net_device *dev, int idx) +static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf) { - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_dev *mdev = priv->mdev->dev; - - if (mlx4_is_mfunc(mdev)) - idx = ndo_dflt_fdb_dump(skb, cb, dev, idx); + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; - return idx; + return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf); } static const struct net_device_ops mlx4_netdev_ops = { @@@ -2070,7 -2014,6 +2070,7 @@@ .ndo_set_mac_address = mlx4_en_set_mac, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = mlx4_en_change_mtu, + .ndo_do_ioctl = mlx4_en_ioctl, .ndo_tx_timeout = mlx4_en_tx_timeout, .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, @@@ -2082,33 -2025,9 +2082,33 @@@ #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif - .ndo_fdb_add = mlx4_en_fdb_add, - .ndo_fdb_del = mlx4_en_fdb_del, - .ndo_fdb_dump = mlx4_en_fdb_dump, +}; + +static const struct net_device_ops mlx4_netdev_ops_master = { + .ndo_open = mlx4_en_open, + .ndo_stop = mlx4_en_close, + .ndo_start_xmit = mlx4_en_xmit, + .ndo_select_queue = mlx4_en_select_queue, + .ndo_get_stats = mlx4_en_get_stats, + .ndo_set_rx_mode = mlx4_en_set_rx_mode, + .ndo_set_mac_address = mlx4_en_set_mac, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = mlx4_en_change_mtu, + .ndo_tx_timeout = mlx4_en_tx_timeout, + .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, + .ndo_set_vf_mac = mlx4_en_set_vf_mac, + .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, + .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, + .ndo_get_vf_config = mlx4_en_get_vf_config, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mlx4_en_netpoll, +#endif + .ndo_set_features = mlx4_en_set_features, + .ndo_setup_tc = mlx4_en_setup_tc, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = mlx4_en_filter_rfs, +#endif }; int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, @@@ -2169,16 -2088,9 +2169,16 @@@ INIT_WORK(&priv->watchdog_task, mlx4_en_restart); INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); + INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); #ifdef CONFIG_MLX4_EN_DCB - if (!mlx4_is_slave(priv->mdev->dev)) - dev->dcbnl_ops = &mlx4_en_dcbnl_ops; + if (!mlx4_is_slave(priv->mdev->dev)) { + if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { + dev->dcbnl_ops = &mlx4_en_dcbnl_ops; + } else { + en_info(priv, "enabling only PFC DCB ops\n"); + dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; + } + } #endif for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) @@@ -2210,11 -2122,6 +2210,11 @@@ spin_lock_init(&priv->filters_lock); #endif + /* Initialize time stamping config */ + priv->hwtstamp_config.flags = 0; + priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; + priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + /* Allocate page for receive rings */ err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); @@@ -2227,10 -2134,7 +2227,10 @@@ /* * Initialize netdev entry points */ - dev->netdev_ops = &mlx4_netdev_ops; + if (mlx4_is_master(priv->mdev->dev)) + dev->netdev_ops = &mlx4_netdev_ops_master; + else + dev->netdev_ops = &mlx4_netdev_ops; dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; netif_set_real_num_tx_queues(dev, priv->tx_ring_num); netif_set_real_num_rx_queues(dev, priv->rx_ring_num); @@@ -2248,8 -2152,8 +2248,8 @@@ dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; dev->features = dev->hw_features | NETIF_F_HIGHDMA | - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER; + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; dev->hw_features |= NETIF_F_LOOPBACK; if (mdev->dev->caps.steering_mode == @@@ -2295,11 -2199,6 +2295,11 @@@ } mlx4_en_set_default_moderation(priv); queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); + + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) + queue_delayed_work(mdev->workqueue, &priv->service_task, + SERVICE_TASK_DELAY); + return 0; out: diff --combined drivers/net/ethernet/mellanox/mlx4/mcg.c index ffc78d2cb0cf,00b4e7be7c7e..f3e804f2a35f --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@@ -645,25 -645,37 +645,37 @@@ static int find_entry(struct mlx4_dev * return err; } + static const u8 __promisc_mode[] = { + [MLX4_FS_REGULAR] = 0x0, + [MLX4_FS_ALL_DEFAULT] = 0x1, + [MLX4_FS_MC_DEFAULT] = 0x3, + [MLX4_FS_UC_SNIFFER] = 0x4, + [MLX4_FS_MC_SNIFFER] = 0x5, + }; + + int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev, + enum mlx4_net_trans_promisc_mode flow_type) + { + if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) { + mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type); + return -EINVAL; + } + return __promisc_mode[flow_type]; + } + EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode); + static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, struct mlx4_net_trans_rule_hw_ctrl *hw) { - static const u8 __promisc_mode[] = { - [MLX4_FS_PROMISC_NONE] = 0x0, - [MLX4_FS_PROMISC_UPLINK] = 0x1, - [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2, - [MLX4_FS_PROMISC_ALL_MULTI] = 0x3, - }; - - u32 dw = 0; - - dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; - dw |= ctrl->exclusive ? (1 << 2) : 0; - dw |= ctrl->allow_loopback ? (1 << 3) : 0; - dw |= __promisc_mode[ctrl->promisc_mode] << 8; - dw |= ctrl->priority << 16; - - hw->ctrl = cpu_to_be32(dw); + u8 flags = 0; + + flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; + flags |= ctrl->exclusive ? (1 << 2) : 0; + flags |= ctrl->allow_loopback ? (1 << 3) : 0; + + hw->flags = flags; + hw->type = __promisc_mode[ctrl->promisc_mode]; + hw->prio = cpu_to_be16(ctrl->priority); hw->port = ctrl->port; hw->qpn = cpu_to_be32(ctrl->qpn); } @@@ -677,29 -689,51 +689,51 @@@ const u16 __sw_id_hw[] = [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 }; + int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, + enum mlx4_net_trans_rule_id id) + { + if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { + mlx4_err(dev, "Invalid network rule id. id = %d\n", id); + return -EINVAL; + } + return __sw_id_hw[id]; + } + EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id); + + static const int __rule_hw_sz[] = { + [MLX4_NET_TRANS_RULE_ID_ETH] = + sizeof(struct mlx4_net_trans_rule_hw_eth), + [MLX4_NET_TRANS_RULE_ID_IB] = + sizeof(struct mlx4_net_trans_rule_hw_ib), + [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, + [MLX4_NET_TRANS_RULE_ID_IPV4] = + sizeof(struct mlx4_net_trans_rule_hw_ipv4), + [MLX4_NET_TRANS_RULE_ID_TCP] = + sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), + [MLX4_NET_TRANS_RULE_ID_UDP] = + sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) + }; + + int mlx4_hw_rule_sz(struct mlx4_dev *dev, + enum mlx4_net_trans_rule_id id) + { + if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { + mlx4_err(dev, "Invalid network rule id. id = %d\n", id); + return -EINVAL; + } + + return __rule_hw_sz[id]; + } + EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz); + static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, struct _rule_hw *rule_hw) { - static const size_t __rule_hw_sz[] = { - [MLX4_NET_TRANS_RULE_ID_ETH] = - sizeof(struct mlx4_net_trans_rule_hw_eth), - [MLX4_NET_TRANS_RULE_ID_IB] = - sizeof(struct mlx4_net_trans_rule_hw_ib), - [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, - [MLX4_NET_TRANS_RULE_ID_IPV4] = - sizeof(struct mlx4_net_trans_rule_hw_ipv4), - [MLX4_NET_TRANS_RULE_ID_TCP] = - sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), - [MLX4_NET_TRANS_RULE_ID_UDP] = - sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) - }; - if (spec->id >= MLX4_NET_TRANS_RULE_NUM) { - mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id); + if (mlx4_hw_rule_sz(dev, spec->id) < 0) return -EINVAL; - } - memset(rule_hw, 0, __rule_hw_sz[spec->id]); + memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id)); rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); - rule_hw->size = __rule_hw_sz[spec->id] >> 2; + rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2; switch (spec->id) { case MLX4_NET_TRANS_RULE_ID_ETH: @@@ -713,12 -747,12 +747,12 @@@ rule_hw->eth.ether_type_enable = 1; rule_hw->eth.ether_type = spec->eth.ether_type; } - rule_hw->eth.vlan_id = spec->eth.vlan_id; - rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk; + rule_hw->eth.vlan_tag = spec->eth.vlan_id; + rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk; break; case MLX4_NET_TRANS_RULE_ID_IB: - rule_hw->ib.qpn = spec->ib.r_qpn; + rule_hw->ib.l3_qpn = spec->ib.l3_qpn; rule_hw->ib.qpn_mask = spec->ib.qpn_msk; memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); @@@ -1125,18 -1159,35 +1159,18 @@@ static int mlx4_QP_ATTACH(struct mlx4_d return err; } -int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], - u8 port, int block_mcast_loopback, - enum mlx4_protocol prot, u64 *reg_id) +int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, + u8 gid[16], u8 port, + int block_mcast_loopback, + enum mlx4_protocol prot, u64 *reg_id) { - - switch (dev->caps.steering_mode) { - case MLX4_STEERING_MODE_A0: - if (prot == MLX4_PROT_ETH) - return 0; - - case MLX4_STEERING_MODE_B0: - if (prot == MLX4_PROT_ETH) - gid[7] |= (MLX4_MC_STEER << 1); - - if (mlx4_is_mfunc(dev)) - return mlx4_QP_ATTACH(dev, qp, gid, 1, - block_mcast_loopback, prot); - return mlx4_qp_attach_common(dev, qp, gid, - block_mcast_loopback, prot, - MLX4_MC_STEER); - - case MLX4_STEERING_MODE_DEVICE_MANAGED: { struct mlx4_spec_list spec = { {NULL} }; __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); struct mlx4_net_trans_rule rule = { .queue_mode = MLX4_NET_TRANS_Q_FIFO, .exclusive = 0, - .promisc_mode = MLX4_FS_PROMISC_NONE, + .promisc_mode = MLX4_FS_REGULAR, .priority = MLX4_DOMAIN_NIC, }; @@@ -1163,32 -1214,8 +1197,32 @@@ list_add_tail(&spec.list, &rule.list); return mlx4_flow_attach(dev, &rule, reg_id); - } +} + +int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + u8 port, int block_mcast_loopback, + enum mlx4_protocol prot, u64 *reg_id) +{ + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_A0: + if (prot == MLX4_PROT_ETH) + return 0; + + case MLX4_STEERING_MODE_B0: + if (prot == MLX4_PROT_ETH) + gid[7] |= (MLX4_MC_STEER << 1); + + if (mlx4_is_mfunc(dev)) + return mlx4_QP_ATTACH(dev, qp, gid, 1, + block_mcast_loopback, prot); + return mlx4_qp_attach_common(dev, qp, gid, + block_mcast_loopback, prot, + MLX4_MC_STEER); + case MLX4_STEERING_MODE_DEVICE_MANAGED: + return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, + block_mcast_loopback, + prot, reg_id); default: return -EINVAL; } @@@ -1229,11 -1256,10 +1263,10 @@@ int mlx4_flow_steer_promisc_add(struct u64 *regid_p; switch (mode) { - case MLX4_FS_PROMISC_UPLINK: - case MLX4_FS_PROMISC_FUNCTION_PORT: + case MLX4_FS_ALL_DEFAULT: regid_p = &dev->regid_promisc_array[port]; break; - case MLX4_FS_PROMISC_ALL_MULTI: + case MLX4_FS_MC_DEFAULT: regid_p = &dev->regid_allmulti_array[port]; break; default: @@@ -1260,11 -1286,10 +1293,10 @@@ int mlx4_flow_steer_promisc_remove(stru u64 *regid_p; switch (mode) { - case MLX4_FS_PROMISC_UPLINK: - case MLX4_FS_PROMISC_FUNCTION_PORT: + case MLX4_FS_ALL_DEFAULT: regid_p = &dev->regid_promisc_array[port]; break; - case MLX4_FS_PROMISC_ALL_MULTI: + case MLX4_FS_MC_DEFAULT: regid_p = &dev->regid_allmulti_array[port]; break; default: diff --combined drivers/net/ethernet/mellanox/mlx4/mlx4.h index eac3dae10efe,d5fdb19771e2..df15bb6631cc --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@@ -87,8 -87,7 +87,8 @@@ enum MLX4_HCR_SIZE = 0x0001c, MLX4_CLR_INT_SIZE = 0x00008, MLX4_SLAVE_COMM_BASE = 0x0, - MLX4_COMM_PAGESIZE = 0x1000 + MLX4_COMM_PAGESIZE = 0x1000, + MLX4_CLOCK_SIZE = 0x00008 }; enum { @@@ -404,7 -403,6 +404,7 @@@ struct mlx4_fw u64 clr_int_base; u64 catas_offset; u64 comm_base; + u64 clock_offset; struct mlx4_icm *fw_icm; struct mlx4_icm *aux_icm; u32 catas_size; @@@ -412,7 -410,6 +412,7 @@@ u8 clr_int_bar; u8 catas_bar; u8 comm_bar; + u8 clock_bar; }; struct mlx4_comm { @@@ -473,30 -470,6 +473,30 @@@ struct mlx4_slave_state enum slave_port_state port_state[MLX4_MAX_PORTS + 1]; }; +#define MLX4_VGT 4095 +#define NO_INDX (-1) + +struct mlx4_vport_state { + u64 mac; + u16 default_vlan; + u8 default_qos; + u32 tx_rate; + bool spoofchk; +}; + +struct mlx4_vf_admin_state { + struct mlx4_vport_state vport[MLX4_MAX_PORTS + 1]; +}; + +struct mlx4_vport_oper_state { + struct mlx4_vport_state state; + int mac_idx; + int vlan_idx; +}; +struct mlx4_vf_oper_state { + struct mlx4_vport_oper_state vport[MLX4_MAX_PORTS + 1]; +}; + struct slave_list { struct mutex mutex; struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE]; @@@ -527,8 -500,6 +527,8 @@@ struct mlx4_master_qp0_state struct mlx4_mfunc_master_ctx { struct mlx4_slave_state *slave_state; + struct mlx4_vf_admin_state *vf_admin; + struct mlx4_vf_oper_state *vf_oper; struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; int init_port_ref[MLX4_MAX_PORTS + 1]; u16 max_mtu[MLX4_MAX_PORTS + 1]; @@@ -730,85 -701,6 +730,6 @@@ struct mlx4_steer struct list_head steer_entries[MLX4_NUM_STEERS]; }; - struct mlx4_net_trans_rule_hw_ctrl { - __be32 ctrl; - u8 rsvd1; - u8 funcid; - u8 vep; - u8 port; - __be32 qpn; - __be32 rsvd2; - }; - - struct mlx4_net_trans_rule_hw_ib { - u8 size; - u8 rsvd1; - __be16 id; - u32 rsvd2; - __be32 qpn; - __be32 qpn_mask; - u8 dst_gid[16]; - u8 dst_gid_msk[16]; - } __packed; - - struct mlx4_net_trans_rule_hw_eth { - u8 size; - u8 rsvd; - __be16 id; - u8 rsvd1[6]; - u8 dst_mac[6]; - u16 rsvd2; - u8 dst_mac_msk[6]; - u16 rsvd3; - u8 src_mac[6]; - u16 rsvd4; - u8 src_mac_msk[6]; - u8 rsvd5; - u8 ether_type_enable; - __be16 ether_type; - __be16 vlan_id_msk; - __be16 vlan_id; - } __packed; - - struct mlx4_net_trans_rule_hw_tcp_udp { - u8 size; - u8 rsvd; - __be16 id; - __be16 rsvd1[3]; - __be16 dst_port; - __be16 rsvd2; - __be16 dst_port_msk; - __be16 rsvd3; - __be16 src_port; - __be16 rsvd4; - __be16 src_port_msk; - } __packed; - - struct mlx4_net_trans_rule_hw_ipv4 { - u8 size; - u8 rsvd; - __be16 id; - __be32 rsvd1; - __be32 dst_ip; - __be32 dst_ip_msk; - __be32 src_ip; - __be32 src_ip_msk; - } __packed; - - struct _rule_hw { - union { - struct { - u8 size; - u8 rsvd; - __be16 id; - }; - struct mlx4_net_trans_rule_hw_eth eth; - struct mlx4_net_trans_rule_hw_ib ib; - struct mlx4_net_trans_rule_hw_ipv4 ipv4; - struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp; - }; - }; - enum { MLX4_PCI_DEV_IS_VF = 1 << 0, MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1, @@@ -855,7 -747,6 +776,7 @@@ struct mlx4_priv struct list_head bf_list; struct mutex bf_mutex; struct io_mapping *bf_mapping; + void __iomem *clock_mapping; int reserved_mtts; int fs_hash_mode; u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; @@@ -1157,8 -1048,6 +1078,8 @@@ int mlx4_change_port_types(struct mlx4_ void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); +void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); +int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); /* resource tracker functions*/ @@@ -1222,10 -1111,6 +1143,10 @@@ int mlx4_qp_detach_common(struct mlx4_d int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot, enum mlx4_steer_type steer); +int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, + u8 gid[16], u8 port, + int block_mcast_loopback, + enum mlx4_protocol prot, u64 *reg_id); int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, diff --combined include/linux/mlx4/device.h index 53acaf64189f,ad4a53fbdddf..a51b0134ce18 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@@ -40,8 -40,6 +40,8 @@@ #include +#include + #define MAX_MSIX_P_PORT 17 #define MAX_MSIX 64 #define MSIX_LEGACY_SZ 4 @@@ -142,7 -140,6 +142,7 @@@ enum MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, + MLX4_DEV_CAP_FLAG_SET_ETH_SCHED = 1LL << 53, MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55, MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59, MLX4_DEV_CAP_FLAG_64B_EQE = 1LL << 61, @@@ -154,10 -151,7 +154,10 @@@ enum MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1, MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2, MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3, - MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN = 1LL << 4 + MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN = 1LL << 4, + MLX4_DEV_CAP_FLAG2_TS = 1LL << 5, + MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6, + MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7 }; enum { @@@ -449,7 -443,6 +449,7 @@@ struct mlx4_caps u8 eqe_factor; u32 userspace_caps; /* userspace must be aware of these */ u32 function_caps; /* VFs must be aware of these */ + u16 hca_core_clock; }; struct mlx4_buf_list { @@@ -844,7 -837,7 +844,7 @@@ void mlx4_free_hwq_res(struct mlx4_dev int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, - unsigned vector, int collapsed); + unsigned vector, int collapsed, int timestamp_en); void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); @@@ -903,11 -896,12 +903,12 @@@ static inline int map_hw_to_sw_id(u16 h } enum mlx4_net_trans_promisc_mode { - MLX4_FS_PROMISC_NONE = 0, - MLX4_FS_PROMISC_UPLINK, - /* For future use. Not implemented yet */ - MLX4_FS_PROMISC_FUNCTION_PORT, - MLX4_FS_PROMISC_ALL_MULTI, + MLX4_FS_REGULAR = 1, + MLX4_FS_ALL_DEFAULT, + MLX4_FS_MC_DEFAULT, + MLX4_FS_UC_SNIFFER, + MLX4_FS_MC_SNIFFER, + MLX4_FS_MODE_NUM, /* should be last */ }; struct mlx4_spec_eth { @@@ -936,7 -930,7 +937,7 @@@ struct mlx4_spec_ipv4 }; struct mlx4_spec_ib { - __be32 r_qpn; + __be32 l3_qpn; __be32 qpn_msk; u8 dst_gid[16]; u8 dst_gid_msk[16]; @@@ -969,6 -963,92 +970,92 @@@ struct mlx4_net_trans_rule u32 qpn; }; + struct mlx4_net_trans_rule_hw_ctrl { + __be16 prio; + u8 type; + u8 flags; + u8 rsvd1; + u8 funcid; + u8 vep; + u8 port; + __be32 qpn; + __be32 rsvd2; + }; + + struct mlx4_net_trans_rule_hw_ib { + u8 size; + u8 rsvd1; + __be16 id; + u32 rsvd2; + __be32 l3_qpn; + __be32 qpn_mask; + u8 dst_gid[16]; + u8 dst_gid_msk[16]; + } __packed; + + struct mlx4_net_trans_rule_hw_eth { + u8 size; + u8 rsvd; + __be16 id; + u8 rsvd1[6]; + u8 dst_mac[6]; + u16 rsvd2; + u8 dst_mac_msk[6]; + u16 rsvd3; + u8 src_mac[6]; + u16 rsvd4; + u8 src_mac_msk[6]; + u8 rsvd5; + u8 ether_type_enable; + __be16 ether_type; + __be16 vlan_tag_msk; + __be16 vlan_tag; + } __packed; + + struct mlx4_net_trans_rule_hw_tcp_udp { + u8 size; + u8 rsvd; + __be16 id; + __be16 rsvd1[3]; + __be16 dst_port; + __be16 rsvd2; + __be16 dst_port_msk; + __be16 rsvd3; + __be16 src_port; + __be16 rsvd4; + __be16 src_port_msk; + } __packed; + + struct mlx4_net_trans_rule_hw_ipv4 { + u8 size; + u8 rsvd; + __be16 id; + __be32 rsvd1; + __be32 dst_ip; + __be32 dst_ip_msk; + __be32 src_ip; + __be32 src_ip_msk; + } __packed; + + struct _rule_hw { + union { + struct { + u8 size; + u8 rsvd; + __be16 id; + }; + struct mlx4_net_trans_rule_hw_eth eth; + struct mlx4_net_trans_rule_hw_ib ib; + struct mlx4_net_trans_rule_hw_ipv4 ipv4; + struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp; + }; + }; + + /* translating DMFS verbs sniffer rule to the FW API would need two reg IDs */ + struct mlx4_flow_handle { + u64 reg_id[2]; + }; + int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn, enum mlx4_net_trans_promisc_mode mode); int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, @@@ -1018,6 -1098,11 +1105,11 @@@ void mlx4_counter_free(struct mlx4_dev int mlx4_flow_attach(struct mlx4_dev *dev, struct mlx4_net_trans_rule *rule, u64 *reg_id); int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id); + int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev, + enum mlx4_net_trans_promisc_mode flow_type); + int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, + enum mlx4_net_trans_rule_id id); + int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id); void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val); @@@ -1035,6 -1120,4 +1127,6 @@@ int set_and_calc_slave_port_state(struc void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid); __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave); +cycle_t mlx4_read_clock(struct mlx4_dev *dev); + #endif /* MLX4_DEVICE_H */