]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/net/ethernet/marvell/mvpp2.c
inetpeer: fix RCU lookup()
[linux.git] / drivers / net / ethernet / marvell / mvpp2.c
index 1bf327271cee34bab6720c70df028b71b5a2513e..f37c05fed5bcf3c601ef64c912c9f58051fd7686 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/inetdevice.h>
 #include <linux/mbus.h>
 #include <linux/module.h>
+#include <linux/mfd/syscon.h>
 #include <linux/interrupt.h>
 #include <linux/cpumask.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/phy.h>
+#include <linux/phy/phy.h>
 #include <linux/clk.h>
 #include <linux/hrtimer.h>
 #include <linux/ktime.h>
+#include <linux/regmap.h>
 #include <uapi/linux/ppp_defs.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
+#include <net/tso.h>
 
 /* RX Fifo Registers */
 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port)      (0x00 + 4 * (port))
 #define MVPP2_TXQ_DESC_ADDR_REG                        0x2084
 #define MVPP2_TXQ_DESC_SIZE_REG                        0x2088
 #define     MVPP2_TXQ_DESC_SIZE_MASK           0x3ff0
+#define MVPP2_TXQ_THRESH_REG                   0x2094
+#define            MVPP2_TXQ_THRESH_OFFSET             16
+#define            MVPP2_TXQ_THRESH_MASK               0x3fff
 #define MVPP2_AGGR_TXQ_UPDATE_REG              0x2090
 #define MVPP2_TXQ_INDEX_REG                    0x2098
 #define MVPP2_TXQ_PREF_BUF_REG                 0x209c
 #define MVPP22_AXI_CODE_DOMAIN_SYSTEM          3
 
 /* Interrupt Cause and Mask registers */
+#define MVPP2_ISR_TX_THRESHOLD_REG(port)       (0x5140 + 4 * (port))
+#define     MVPP2_MAX_ISR_TX_THRESHOLD         0xfffff0
+
 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq)                (0x5200 + 4 * (rxq))
 #define     MVPP2_MAX_ISR_RX_THRESHOLD         0xfffff0
 #define MVPP21_ISR_RXQ_GROUP_REG(port)         (0x5400 + 4 * (port))
 
-#define MVPP22_ISR_RXQ_GROUP_INDEX_REG          0x5400
+#define MVPP22_ISR_RXQ_GROUP_INDEX_REG         0x5400
 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
-#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK   0x380
-#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK  0x380
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET        7
 
 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
-#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK   0x380
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK  0x380
 
-#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG     0x5404
-#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK    0x1f
-#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK      0xf00
-#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET    8
+#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG    0x5404
+#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK   0x1f
+#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK     0xf00
+#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET   8
 
 #define MVPP2_ISR_ENABLE_REG(port)             (0x5420 + 4 * (port))
 #define     MVPP2_ISR_ENABLE_INTERRUPT(mask)   ((mask) & 0xffff)
 #define MVPP2_ISR_RX_TX_CAUSE_REG(port)                (0x5480 + 4 * (port))
 #define     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK        0xffff
 #define     MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK        0xff0000
+#define     MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET      16
 #define     MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK   BIT(24)
 #define     MVPP2_CAUSE_FCS_ERR_MASK           BIT(25)
 #define     MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK  BIT(26)
 #define MVPP2_BM_VIRT_RLS_REG                  0x64c0
 #define MVPP22_BM_ADDR_HIGH_RLS_REG            0x64c4
 #define     MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK  0xff
-#define            MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK   0xff00
+#define     MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK  0xff00
 #define     MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
 
 /* TX Scheduler registers */
 
 /* Per-port registers */
 #define MVPP2_GMAC_CTRL_0_REG                  0x0
-#define      MVPP2_GMAC_PORT_EN_MASK           BIT(0)
-#define      MVPP2_GMAC_MAX_RX_SIZE_OFFS       2
-#define      MVPP2_GMAC_MAX_RX_SIZE_MASK       0x7ffc
-#define      MVPP2_GMAC_MIB_CNTR_EN_MASK       BIT(15)
+#define     MVPP2_GMAC_PORT_EN_MASK            BIT(0)
+#define     MVPP2_GMAC_PORT_TYPE_MASK          BIT(1)
+#define     MVPP2_GMAC_MAX_RX_SIZE_OFFS                2
+#define     MVPP2_GMAC_MAX_RX_SIZE_MASK                0x7ffc
+#define     MVPP2_GMAC_MIB_CNTR_EN_MASK                BIT(15)
 #define MVPP2_GMAC_CTRL_1_REG                  0x4
-#define      MVPP2_GMAC_PERIODIC_XON_EN_MASK   BIT(1)
-#define      MVPP2_GMAC_GMII_LB_EN_MASK                BIT(5)
-#define      MVPP2_GMAC_PCS_LB_EN_BIT          6
-#define      MVPP2_GMAC_PCS_LB_EN_MASK         BIT(6)
-#define      MVPP2_GMAC_SA_LOW_OFFS            7
+#define     MVPP2_GMAC_PERIODIC_XON_EN_MASK    BIT(1)
+#define     MVPP2_GMAC_GMII_LB_EN_MASK         BIT(5)
+#define     MVPP2_GMAC_PCS_LB_EN_BIT           6
+#define     MVPP2_GMAC_PCS_LB_EN_MASK          BIT(6)
+#define     MVPP2_GMAC_SA_LOW_OFFS             7
 #define MVPP2_GMAC_CTRL_2_REG                  0x8
-#define      MVPP2_GMAC_INBAND_AN_MASK         BIT(0)
-#define      MVPP2_GMAC_PCS_ENABLE_MASK                BIT(3)
-#define      MVPP2_GMAC_PORT_RGMII_MASK                BIT(4)
-#define      MVPP2_GMAC_PORT_RESET_MASK                BIT(6)
+#define     MVPP2_GMAC_INBAND_AN_MASK          BIT(0)
+#define     MVPP2_GMAC_FLOW_CTRL_MASK          GENMASK(2, 1)
+#define     MVPP2_GMAC_PCS_ENABLE_MASK         BIT(3)
+#define     MVPP2_GMAC_PORT_RGMII_MASK         BIT(4)
+#define     MVPP2_GMAC_DISABLE_PADDING         BIT(5)
+#define     MVPP2_GMAC_PORT_RESET_MASK         BIT(6)
 #define MVPP2_GMAC_AUTONEG_CONFIG              0xc
-#define      MVPP2_GMAC_FORCE_LINK_DOWN                BIT(0)
-#define      MVPP2_GMAC_FORCE_LINK_PASS                BIT(1)
-#define      MVPP2_GMAC_CONFIG_MII_SPEED       BIT(5)
-#define      MVPP2_GMAC_CONFIG_GMII_SPEED      BIT(6)
-#define      MVPP2_GMAC_AN_SPEED_EN            BIT(7)
-#define      MVPP2_GMAC_FC_ADV_EN              BIT(9)
-#define      MVPP2_GMAC_CONFIG_FULL_DUPLEX     BIT(12)
-#define      MVPP2_GMAC_AN_DUPLEX_EN           BIT(13)
+#define     MVPP2_GMAC_FORCE_LINK_DOWN         BIT(0)
+#define     MVPP2_GMAC_FORCE_LINK_PASS         BIT(1)
+#define     MVPP2_GMAC_IN_BAND_AUTONEG         BIT(2)
+#define     MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS  BIT(3)
+#define     MVPP2_GMAC_CONFIG_MII_SPEED        BIT(5)
+#define     MVPP2_GMAC_CONFIG_GMII_SPEED       BIT(6)
+#define     MVPP2_GMAC_AN_SPEED_EN             BIT(7)
+#define     MVPP2_GMAC_FC_ADV_EN               BIT(9)
+#define     MVPP2_GMAC_FLOW_CTRL_AUTONEG       BIT(11)
+#define     MVPP2_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
+#define     MVPP2_GMAC_AN_DUPLEX_EN            BIT(13)
+#define MVPP2_GMAC_STATUS0                     0x10
+#define     MVPP2_GMAC_STATUS0_LINK_UP         BIT(0)
 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG         0x1c
-#define      MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS    6
-#define      MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK        0x1fc0
-#define      MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
+#define     MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS     6
+#define     MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
+#define     MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v)  (((v) << 6) & \
                                        MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
+#define MVPP22_GMAC_INT_STAT                   0x20
+#define     MVPP22_GMAC_INT_STAT_LINK          BIT(1)
+#define MVPP22_GMAC_INT_MASK                   0x24
+#define     MVPP22_GMAC_INT_MASK_LINK_STAT     BIT(1)
 #define MVPP22_GMAC_CTRL_4_REG                 0x90
-#define      MVPP22_CTRL4_EXT_PIN_GMII_SEL     BIT(0)
-#define      MVPP22_CTRL4_DP_CLK_SEL           BIT(5)
-#define      MVPP22_CTRL4_SYNC_BYPASS          BIT(6)
-#define      MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
+#define     MVPP22_CTRL4_EXT_PIN_GMII_SEL      BIT(0)
+#define     MVPP22_CTRL4_DP_CLK_SEL            BIT(5)
+#define     MVPP22_CTRL4_SYNC_BYPASS_DIS       BIT(6)
+#define     MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE  BIT(7)
+#define MVPP22_GMAC_INT_SUM_MASK               0xa4
+#define     MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
 
 /* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
  * relative to port->base.
  */
 #define MVPP22_XLG_CTRL0_REG                   0x100
-#define      MVPP22_XLG_CTRL0_PORT_EN          BIT(0)
-#define      MVPP22_XLG_CTRL0_MAC_RESET_DIS    BIT(1)
-#define      MVPP22_XLG_CTRL0_MIB_CNT_DIS      BIT(14)
-
+#define     MVPP22_XLG_CTRL0_PORT_EN           BIT(0)
+#define     MVPP22_XLG_CTRL0_MAC_RESET_DIS     BIT(1)
+#define     MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN   BIT(7)
+#define     MVPP22_XLG_CTRL0_MIB_CNT_DIS       BIT(14)
+#define MVPP22_XLG_CTRL1_REG                   0x104
+#define     MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS       0
+#define     MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK       0x1fff
+#define MVPP22_XLG_STATUS                      0x10c
+#define     MVPP22_XLG_STATUS_LINK_UP          BIT(0)
+#define MVPP22_XLG_INT_STAT                    0x114
+#define     MVPP22_XLG_INT_STAT_LINK           BIT(1)
+#define MVPP22_XLG_INT_MASK                    0x118
+#define     MVPP22_XLG_INT_MASK_LINK           BIT(1)
 #define MVPP22_XLG_CTRL3_REG                   0x11c
-#define      MVPP22_XLG_CTRL3_MACMODESELECT_MASK       (7 << 13)
-#define      MVPP22_XLG_CTRL3_MACMODESELECT_GMAC       (0 << 13)
-#define      MVPP22_XLG_CTRL3_MACMODESELECT_10G                (1 << 13)
+#define     MVPP22_XLG_CTRL3_MACMODESELECT_MASK        (7 << 13)
+#define     MVPP22_XLG_CTRL3_MACMODESELECT_GMAC        (0 << 13)
+#define     MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
+#define MVPP22_XLG_EXT_INT_MASK                        0x15c
+#define     MVPP22_XLG_EXT_INT_MASK_XLG                BIT(1)
+#define     MVPP22_XLG_EXT_INT_MASK_GIG                BIT(2)
+#define MVPP22_XLG_CTRL4_REG                   0x184
+#define     MVPP22_XLG_CTRL4_FWD_FC            BIT(5)
+#define     MVPP22_XLG_CTRL4_FWD_PFC           BIT(6)
+#define     MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
 
 /* SMI registers. PPv2.2 only, relative to priv->iface_base. */
 #define MVPP22_SMI_MISC_CFG_REG                        0x1204
-#define      MVPP22_SMI_POLLING_EN             BIT(10)
+#define     MVPP22_SMI_POLLING_EN              BIT(10)
 
 #define MVPP22_GMAC_BASE(port)         (0x7000 + (port) * 0x1000 + 0xe00)
 
 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
        (((index) < (q)->last_desc) ? ((index) + 1) : 0)
 
+/* XPCS registers. PPv2.2 only */
+#define MVPP22_MPCS_BASE(port)                 (0x7000 + (port) * 0x1000)
+#define MVPP22_MPCS_CTRL                       0x14
+#define     MVPP22_MPCS_CTRL_FWD_ERR_CONN      BIT(10)
+#define MVPP22_MPCS_CLK_RESET                  0x14c
+#define     MAC_CLK_RESET_SD_TX                        BIT(0)
+#define     MAC_CLK_RESET_SD_RX                        BIT(1)
+#define     MAC_CLK_RESET_MAC                  BIT(2)
+#define     MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4)
+#define     MVPP22_MPCS_CLK_RESET_DIV_SET      BIT(11)
+
+/* XPCS registers. PPv2.2 only */
+#define MVPP22_XPCS_BASE(port)                 (0x7400 + (port) * 0x1000)
+#define MVPP22_XPCS_CFG0                       0x0
+#define     MVPP22_XPCS_CFG0_PCS_MODE(n)       ((n) << 3)
+#define     MVPP22_XPCS_CFG0_ACTIVE_LANE(n)    ((n) << 5)
+
+/* System controller registers. Accessed through a regmap. */
+#define GENCONF_SOFT_RESET1                            0x1108
+#define     GENCONF_SOFT_RESET1_GOP                    BIT(6)
+#define GENCONF_PORT_CTRL0                             0x1110
+#define     GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT                BIT(1)
+#define     GENCONF_PORT_CTRL0_RX_DATA_SAMPLE          BIT(29)
+#define     GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR       BIT(31)
+#define GENCONF_PORT_CTRL1                             0x1114
+#define     GENCONF_PORT_CTRL1_EN(p)                   BIT(p)
+#define     GENCONF_PORT_CTRL1_RESET(p)                        (BIT(p) << 28)
+#define GENCONF_CTRL0                                  0x1120
+#define     GENCONF_CTRL0_PORT0_RGMII                  BIT(0)
+#define     GENCONF_CTRL0_PORT1_RGMII_MII              BIT(1)
+#define     GENCONF_CTRL0_PORT1_RGMII                  BIT(2)
+
 /* Various constants */
 
 /* Coalescing */
 #define MVPP2_TXDONE_COAL_PKTS_THRESH  15
 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
+#define MVPP2_TXDONE_COAL_USEC         1000
 #define MVPP2_RX_COAL_PKTS             32
 #define MVPP2_RX_COAL_USEC             100
 
@@ -709,6 +783,11 @@ struct mvpp2 {
         */
        void __iomem *swth_base[MVPP2_MAX_THREADS];
 
+       /* On PPv2.2, some port control registers are located into the system
+        * controller space. These registers are accessible through a regmap.
+        */
+       struct regmap *sysctrl_base;
+
        /* Common clocks */
        struct clk *pp_clk;
        struct clk *gop_clk;
@@ -774,6 +853,8 @@ struct mvpp2_port {
         */
        int gop_id;
 
+       int link_irq;
+
        struct mvpp2 *priv;
 
        /* Per-port registers' base address */
@@ -799,6 +880,7 @@ struct mvpp2_port {
 
        phy_interface_t phy_interface;
        struct device_node *phy_node;
+       struct phy *comphy;
        unsigned int link;
        unsigned int duplex;
        unsigned int speed;
@@ -811,6 +893,9 @@ struct mvpp2_port {
 
        struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS];
        unsigned int nqvecs;
+       bool has_tx_irqs;
+
+       u32 tx_time_coal;
 };
 
 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
@@ -946,6 +1031,10 @@ struct mvpp2_txq_pcpu {
 
        /* Index of the TX DMA descriptor to be cleaned up */
        int txq_get_index;
+
+       /* DMA buffer for TSO headers */
+       char *tso_headers;
+       dma_addr_t tso_headers_dma;
 };
 
 struct mvpp2_tx_queue {
@@ -1076,6 +1165,15 @@ struct mvpp2_bm_pool {
        u32 port_map;
 };
 
+/* Queue modes */
+#define MVPP2_QDIST_SINGLE_MODE        0
+#define MVPP2_QDIST_MULTI_MODE 1
+
+static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
+
+module_param(queue_mode, int, 0444);
+MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
+
 #define MVPP2_DRIVER_NAME "mvpp2"
 #define MVPP2_DRIVER_VERSION "1.0"
 
@@ -4187,15 +4285,346 @@ static void mvpp2_interrupts_mask(void *arg)
 static void mvpp2_interrupts_unmask(void *arg)
 {
        struct mvpp2_port *port = arg;
+       u32 val;
+
+       val = MVPP2_CAUSE_MISC_SUM_MASK |
+               MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+       if (port->has_tx_irqs)
+               val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
 
        mvpp2_percpu_write(port->priv, smp_processor_id(),
-                          MVPP2_ISR_RX_TX_MASK_REG(port->id),
-                          (MVPP2_CAUSE_MISC_SUM_MASK |
-                           MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
+                          MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
+}
+
+static void
+mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
+{
+       u32 val;
+       int i;
+
+       if (port->priv->hw_version != MVPP22)
+               return;
+
+       if (mask)
+               val = 0;
+       else
+               val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+
+       for (i = 0; i < port->nqvecs; i++) {
+               struct mvpp2_queue_vector *v = port->qvecs + i;
+
+               if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
+                       continue;
+
+               mvpp2_percpu_write(port->priv, v->sw_thread_id,
+                                  MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
+       }
 }
 
 /* Port configuration routines */
 
+static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
+{
+       struct mvpp2 *priv = port->priv;
+       u32 val;
+
+       regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
+       val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
+       regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
+
+       regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
+       if (port->gop_id == 2)
+               val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
+       else if (port->gop_id == 3)
+               val |= GENCONF_CTRL0_PORT1_RGMII_MII;
+       regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
+}
+
+static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
+{
+       struct mvpp2 *priv = port->priv;
+       u32 val;
+
+       regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
+       val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
+              GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
+       regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
+
+       if (port->gop_id > 1) {
+               regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
+               if (port->gop_id == 2)
+                       val &= ~GENCONF_CTRL0_PORT0_RGMII;
+               else if (port->gop_id == 3)
+                       val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
+               regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
+       }
+}
+
+static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
+{
+       struct mvpp2 *priv = port->priv;
+       void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
+       void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
+       u32 val;
+
+       /* XPCS */
+       val = readl(xpcs + MVPP22_XPCS_CFG0);
+       val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
+                MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
+       val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
+       writel(val, xpcs + MVPP22_XPCS_CFG0);
+
+       /* MPCS */
+       val = readl(mpcs + MVPP22_MPCS_CTRL);
+       val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
+       writel(val, mpcs + MVPP22_MPCS_CTRL);
+
+       val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
+       val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
+                MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
+       val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
+       writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
+
+       val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
+       val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
+       writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
+}
+
+static int mvpp22_gop_init(struct mvpp2_port *port)
+{
+       struct mvpp2 *priv = port->priv;
+       u32 val;
+
+       if (!priv->sysctrl_base)
+               return 0;
+
+       switch (port->phy_interface) {
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+               if (port->gop_id == 0)
+                       goto invalid_conf;
+               mvpp22_gop_init_rgmii(port);
+               break;
+       case PHY_INTERFACE_MODE_SGMII:
+               mvpp22_gop_init_sgmii(port);
+               break;
+       case PHY_INTERFACE_MODE_10GKR:
+               if (port->gop_id != 0)
+                       goto invalid_conf;
+               mvpp22_gop_init_10gkr(port);
+               break;
+       default:
+               goto unsupported_conf;
+       }
+
+       regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
+       val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
+              GENCONF_PORT_CTRL1_EN(port->gop_id);
+       regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
+
+       regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
+       val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
+       regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
+
+       regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
+       val |= GENCONF_SOFT_RESET1_GOP;
+       regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
+
+unsupported_conf:
+       return 0;
+
+invalid_conf:
+       netdev_err(port->dev, "Invalid port configuration\n");
+       return -EINVAL;
+}
+
+static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
+{
+       u32 val;
+
+       if (phy_interface_mode_is_rgmii(port->phy_interface) ||
+           port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+               /* Enable the GMAC link status irq for this port */
+               val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
+               val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
+               writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
+       }
+
+       if (port->gop_id == 0) {
+               /* Enable the XLG/GIG irqs for this port */
+               val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
+               if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
+                       val |= MVPP22_XLG_EXT_INT_MASK_XLG;
+               else
+                       val |= MVPP22_XLG_EXT_INT_MASK_GIG;
+               writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
+       }
+}
+
+static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
+{
+       u32 val;
+
+       if (port->gop_id == 0) {
+               val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
+               val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
+                        MVPP22_XLG_EXT_INT_MASK_GIG);
+               writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
+       }
+
+       if (phy_interface_mode_is_rgmii(port->phy_interface) ||
+           port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+               val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
+               val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
+               writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
+       }
+}
+
+static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
+{
+       u32 val;
+
+       if (phy_interface_mode_is_rgmii(port->phy_interface) ||
+           port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+               val = readl(port->base + MVPP22_GMAC_INT_MASK);
+               val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
+               writel(val, port->base + MVPP22_GMAC_INT_MASK);
+       }
+
+       if (port->gop_id == 0) {
+               val = readl(port->base + MVPP22_XLG_INT_MASK);
+               val |= MVPP22_XLG_INT_MASK_LINK;
+               writel(val, port->base + MVPP22_XLG_INT_MASK);
+       }
+
+       mvpp22_gop_unmask_irq(port);
+}
+
+static int mvpp22_comphy_init(struct mvpp2_port *port)
+{
+       enum phy_mode mode;
+       int ret;
+
+       if (!port->comphy)
+               return 0;
+
+       switch (port->phy_interface) {
+       case PHY_INTERFACE_MODE_SGMII:
+               mode = PHY_MODE_SGMII;
+               break;
+       case PHY_INTERFACE_MODE_10GKR:
+               mode = PHY_MODE_10GKR;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ret = phy_set_mode(port->comphy, mode);
+       if (ret)
+               return ret;
+
+       return phy_power_on(port->comphy);
+}
+
+static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
+{
+       u32 val;
+
+       if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+               val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
+               val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL |
+                      MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+               val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
+               writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
+
+               val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
+               val |= MVPP2_GMAC_DISABLE_PADDING;
+               val &= ~MVPP2_GMAC_FLOW_CTRL_MASK;
+               writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
+       } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
+               val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
+               val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
+                      MVPP22_CTRL4_SYNC_BYPASS_DIS |
+                      MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+               val &= ~MVPP22_CTRL4_DP_CLK_SEL;
+               writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
+
+               val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
+               val &= ~MVPP2_GMAC_DISABLE_PADDING;
+               writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
+       }
+
+       /* The port is connected to a copper PHY */
+       val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+       val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
+       writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+
+       val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+       val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
+              MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
+              MVPP2_GMAC_AN_DUPLEX_EN;
+       if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
+               val |= MVPP2_GMAC_IN_BAND_AUTONEG;
+       writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+}
+
+static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
+{
+       u32 val;
+
+       /* Force link down */
+       val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+       val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+       val |= MVPP2_GMAC_FORCE_LINK_DOWN;
+       writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+       /* Set the GMAC in a reset state */
+       val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
+       val |= MVPP2_GMAC_PORT_RESET_MASK;
+       writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
+
+       /* Configure the PCS and in-band AN */
+       val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
+       if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+               val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
+       } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
+               val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
+               val |= MVPP2_GMAC_PORT_RGMII_MASK;
+       }
+       writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
+
+       mvpp2_port_mii_gmac_configure_mode(port);
+
+       /* Unset the GMAC reset state */
+       val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
+       val &= ~MVPP2_GMAC_PORT_RESET_MASK;
+       writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
+
+       /* Stop forcing link down */
+       val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+       val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
+       writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+}
+
+static void mvpp2_port_mii_xlg_configure(struct mvpp2_port *port)
+{
+       u32 val;
+
+       if (port->gop_id != 0)
+               return;
+
+       val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+       val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
+       writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+
+       val = readl(port->base + MVPP22_XLG_CTRL4_REG);
+       val &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
+       val |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
+       writel(val, port->base + MVPP22_XLG_CTRL4_REG);
+}
+
 static void mvpp22_port_mii_set(struct mvpp2_port *port)
 {
        u32 val;
@@ -4213,38 +4642,18 @@ static void mvpp22_port_mii_set(struct mvpp2_port *port)
 
                writel(val, port->base + MVPP22_XLG_CTRL3_REG);
        }
-
-       val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
-       if (port->phy_interface == PHY_INTERFACE_MODE_RGMII)
-               val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL;
-       else
-               val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
-       val &= ~MVPP22_CTRL4_DP_CLK_SEL;
-       val |= MVPP22_CTRL4_SYNC_BYPASS;
-       val |= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
-       writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
 }
 
 static void mvpp2_port_mii_set(struct mvpp2_port *port)
 {
-       u32 val;
-
        if (port->priv->hw_version == MVPP22)
                mvpp22_port_mii_set(port);
 
-       val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
-
-       switch (port->phy_interface) {
-       case PHY_INTERFACE_MODE_SGMII:
-               val |= MVPP2_GMAC_INBAND_AN_MASK;
-               break;
-       case PHY_INTERFACE_MODE_RGMII:
-               val |= MVPP2_GMAC_PORT_RGMII_MASK;
-       default:
-               val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
-       }
-
-       writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
+       if (phy_interface_mode_is_rgmii(port->phy_interface) ||
+           port->phy_interface == PHY_INTERFACE_MODE_SGMII)
+               mvpp2_port_mii_gmac_configure(port);
+       else if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
+               mvpp2_port_mii_xlg_configure(port);
 }
 
 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
@@ -4351,6 +4760,18 @@ static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
        writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
 }
 
+/* Change maximum receive size of the port */
+static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
+{
+       u32 val;
+
+       val =  readl(port->base + MVPP22_XLG_CTRL1_REG);
+       val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
+       val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
+              MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
+       writel(val, port->base + MVPP22_XLG_CTRL1_REG);
+}
+
 /* Set defaults to the MVPP2 port */
 static void mvpp2_defaults_set(struct mvpp2_port *port)
 {
@@ -4812,6 +5233,23 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
        put_cpu();
 }
 
+/* For some reason in the LSP this is done on each CPU. Why ? */
+static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
+                                  struct mvpp2_tx_queue *txq)
+{
+       int cpu = get_cpu();
+       u32 val;
+
+       if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
+               txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
+
+       val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
+
+       put_cpu();
+}
+
 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
 {
        u64 tmp = (u64)clk_hz * usec;
@@ -4848,6 +5286,22 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
        mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
 }
 
+static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
+{
+       unsigned long freq = port->priv->tclk;
+       u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
+
+       if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
+               port->tx_time_coal =
+                       mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
+
+               /* re-evaluate to get actual register value */
+               val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
+       }
+
+       mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
+}
+
 /* Free Tx queue skbuffs */
 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
                                struct mvpp2_tx_queue *txq,
@@ -4906,7 +5360,8 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
                        netif_tx_wake_queue(nq);
 }
 
-static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
+static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
+                                 int cpu)
 {
        struct mvpp2_tx_queue *txq;
        struct mvpp2_txq_pcpu *txq_pcpu;
@@ -4917,7 +5372,7 @@ static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
                if (!txq)
                        break;
 
-               txq_pcpu = this_cpu_ptr(txq->pcpu);
+               txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
 
                if (txq_pcpu->count) {
                        mvpp2_txq_done(port, txq, txq_pcpu);
@@ -4933,15 +5388,14 @@ static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
 
 /* Allocate and initialize descriptors for aggr TXQ */
 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
-                              struct mvpp2_tx_queue *aggr_txq,
-                              int desc_num, int cpu,
+                              struct mvpp2_tx_queue *aggr_txq, int cpu,
                               struct mvpp2 *priv)
 {
        u32 txq_dma;
 
        /* Allocate memory for TX descriptors */
        aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
-                               desc_num * MVPP2_DESC_ALIGNED_SIZE,
+                               MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
                                &aggr_txq->descs_dma, GFP_KERNEL);
        if (!aggr_txq->descs)
                return -ENOMEM;
@@ -4962,7 +5416,8 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
                        MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
 
        mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
-       mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
+       mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu),
+                   MVPP2_AGGR_TXQ_SIZE);
 
        return 0;
 }
@@ -5143,6 +5598,14 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
                txq_pcpu->reserved_num = 0;
                txq_pcpu->txq_put_index = 0;
                txq_pcpu->txq_get_index = 0;
+
+               txq_pcpu->tso_headers =
+                       dma_alloc_coherent(port->dev->dev.parent,
+                                          MVPP2_AGGR_TXQ_SIZE * TSO_HEADER_SIZE,
+                                          &txq_pcpu->tso_headers_dma,
+                                          GFP_KERNEL);
+               if (!txq_pcpu->tso_headers)
+                       goto cleanup;
        }
 
        return 0;
@@ -5150,6 +5613,11 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
        for_each_present_cpu(cpu) {
                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
                kfree(txq_pcpu->buffs);
+
+               dma_free_coherent(port->dev->dev.parent,
+                                 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+                                 txq_pcpu->tso_headers,
+                                 txq_pcpu->tso_headers_dma);
        }
 
        dma_free_coherent(port->dev->dev.parent,
@@ -5169,6 +5637,11 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
        for_each_present_cpu(cpu) {
                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
                kfree(txq_pcpu->buffs);
+
+               dma_free_coherent(port->dev->dev.parent,
+                                 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+                                 txq_pcpu->tso_headers,
+                                 txq_pcpu->tso_headers_dma);
        }
 
        if (txq->descs)
@@ -5305,6 +5778,14 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
                        goto err_cleanup;
        }
 
+       if (port->has_tx_irqs) {
+               mvpp2_tx_time_coal_set(port);
+               for (queue = 0; queue < port->ntxqs; queue++) {
+                       txq = port->txqs[queue];
+                       mvpp2_tx_pkts_coal_set(port, txq);
+               }
+       }
+
        on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
        return 0;
 
@@ -5325,63 +5806,161 @@ static irqreturn_t mvpp2_isr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+/* Per-port interrupt for link status changes */
+static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
+{
+       struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
+       struct net_device *dev = port->dev;
+       bool event = false, link = false;
+       u32 val;
+
+       mvpp22_gop_mask_irq(port);
+
+       if (port->gop_id == 0 &&
+           port->phy_interface == PHY_INTERFACE_MODE_10GKR) {
+               val = readl(port->base + MVPP22_XLG_INT_STAT);
+               if (val & MVPP22_XLG_INT_STAT_LINK) {
+                       event = true;
+                       val = readl(port->base + MVPP22_XLG_STATUS);
+                       if (val & MVPP22_XLG_STATUS_LINK_UP)
+                               link = true;
+               }
+       } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
+                  port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+               val = readl(port->base + MVPP22_GMAC_INT_STAT);
+               if (val & MVPP22_GMAC_INT_STAT_LINK) {
+                       event = true;
+                       val = readl(port->base + MVPP2_GMAC_STATUS0);
+                       if (val & MVPP2_GMAC_STATUS0_LINK_UP)
+                               link = true;
+               }
+       }
+
+       if (!netif_running(dev) || !event)
+               goto handled;
+
+       if (link) {
+               mvpp2_interrupts_enable(port);
+
+               mvpp2_egress_enable(port);
+               mvpp2_ingress_enable(port);
+               netif_carrier_on(dev);
+               netif_tx_wake_all_queues(dev);
+       } else {
+               netif_tx_stop_all_queues(dev);
+               netif_carrier_off(dev);
+               mvpp2_ingress_disable(port);
+               mvpp2_egress_disable(port);
+
+               mvpp2_interrupts_disable(port);
+       }
+
+handled:
+       mvpp22_gop_unmask_irq(port);
+       return IRQ_HANDLED;
+}
+
+static void mvpp2_gmac_set_autoneg(struct mvpp2_port *port,
+                                  struct phy_device *phydev)
+{
+       u32 val;
+
+       if (port->phy_interface != PHY_INTERFACE_MODE_RGMII &&
+           port->phy_interface != PHY_INTERFACE_MODE_RGMII_ID &&
+           port->phy_interface != PHY_INTERFACE_MODE_RGMII_RXID &&
+           port->phy_interface != PHY_INTERFACE_MODE_RGMII_TXID &&
+           port->phy_interface != PHY_INTERFACE_MODE_SGMII)
+               return;
+
+       val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+       val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
+                MVPP2_GMAC_CONFIG_GMII_SPEED |
+                MVPP2_GMAC_CONFIG_FULL_DUPLEX |
+                MVPP2_GMAC_AN_SPEED_EN |
+                MVPP2_GMAC_AN_DUPLEX_EN);
+
+       if (phydev->duplex)
+               val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+
+       if (phydev->speed == SPEED_1000)
+               val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
+       else if (phydev->speed == SPEED_100)
+               val |= MVPP2_GMAC_CONFIG_MII_SPEED;
+
+       writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+}
+
 /* Adjust link */
 static void mvpp2_link_event(struct net_device *dev)
 {
        struct mvpp2_port *port = netdev_priv(dev);
        struct phy_device *phydev = dev->phydev;
-       int status_change = 0;
+       bool link_reconfigured = false;
        u32 val;
 
        if (phydev->link) {
+               if (port->phy_interface != phydev->interface && port->comphy) {
+                       /* disable current port for reconfiguration */
+                       mvpp2_interrupts_disable(port);
+                       netif_carrier_off(port->dev);
+                       mvpp2_port_disable(port);
+                       phy_power_off(port->comphy);
+
+                       /* comphy reconfiguration */
+                       port->phy_interface = phydev->interface;
+                       mvpp22_comphy_init(port);
+
+                       /* gop/mac reconfiguration */
+                       mvpp22_gop_init(port);
+                       mvpp2_port_mii_set(port);
+
+                       link_reconfigured = true;
+               }
+
                if ((port->speed != phydev->speed) ||
                    (port->duplex != phydev->duplex)) {
-                       u32 val;
-
-                       val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-                       val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
-                                MVPP2_GMAC_CONFIG_GMII_SPEED |
-                                MVPP2_GMAC_CONFIG_FULL_DUPLEX |
-                                MVPP2_GMAC_AN_SPEED_EN |
-                                MVPP2_GMAC_AN_DUPLEX_EN);
-
-                       if (phydev->duplex)
-                               val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
-
-                       if (phydev->speed == SPEED_1000)
-                               val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
-                       else if (phydev->speed == SPEED_100)
-                               val |= MVPP2_GMAC_CONFIG_MII_SPEED;
-
-                       writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+                       mvpp2_gmac_set_autoneg(port, phydev);
 
                        port->duplex = phydev->duplex;
                        port->speed  = phydev->speed;
                }
        }
 
-       if (phydev->link != port->link) {
-               if (!phydev->link) {
-                       port->duplex = -1;
-                       port->speed = 0;
-               }
-
+       if (phydev->link != port->link || link_reconfigured) {
                port->link = phydev->link;
-               status_change = 1;
-       }
 
-       if (status_change) {
                if (phydev->link) {
-                       val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-                       val |= (MVPP2_GMAC_FORCE_LINK_PASS |
-                               MVPP2_GMAC_FORCE_LINK_DOWN);
-                       writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+                       if (port->phy_interface == PHY_INTERFACE_MODE_RGMII ||
+                           port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+                           port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
+                           port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID ||
+                           port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+                               val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+                               val |= (MVPP2_GMAC_FORCE_LINK_PASS |
+                                       MVPP2_GMAC_FORCE_LINK_DOWN);
+                               writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+                       }
+
+                       mvpp2_interrupts_enable(port);
+                       mvpp2_port_enable(port);
+
                        mvpp2_egress_enable(port);
                        mvpp2_ingress_enable(port);
+                       netif_carrier_on(dev);
+                       netif_tx_wake_all_queues(dev);
                } else {
+                       port->duplex = -1;
+                       port->speed = 0;
+
+                       netif_tx_stop_all_queues(dev);
+                       netif_carrier_off(dev);
                        mvpp2_ingress_disable(port);
                        mvpp2_egress_disable(port);
+
+                       mvpp2_port_disable(port);
+                       mvpp2_interrupts_disable(port);
                }
+
                phy_print_status(phydev);
        }
 }
@@ -5411,7 +5990,7 @@ static void mvpp2_tx_proc_cb(unsigned long data)
 
        /* Process all the Tx queues */
        cause = (1 << port->ntxqs) - 1;
-       tx_todo = mvpp2_tx_done(port, cause);
+       tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
 
        /* Set the timer in case not all the packets were processed */
        if (tx_todo)
@@ -5690,6 +6269,123 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
        return -ENOMEM;
 }
 
+static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
+                                    struct net_device *dev,
+                                    struct mvpp2_tx_queue *txq,
+                                    struct mvpp2_tx_queue *aggr_txq,
+                                    struct mvpp2_txq_pcpu *txq_pcpu,
+                                    int hdr_sz)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+       dma_addr_t addr;
+
+       mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+       mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
+
+       addr = txq_pcpu->tso_headers_dma +
+              txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
+       mvpp2_txdesc_offset_set(port, tx_desc, addr & MVPP2_TX_DESC_ALIGN);
+       mvpp2_txdesc_dma_addr_set(port, tx_desc, addr & ~MVPP2_TX_DESC_ALIGN);
+
+       mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
+                                           MVPP2_TXD_F_DESC |
+                                           MVPP2_TXD_PADDING_DISABLE);
+       mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
+}
+
+static inline int mvpp2_tso_put_data(struct sk_buff *skb,
+                                    struct net_device *dev, struct tso_t *tso,
+                                    struct mvpp2_tx_queue *txq,
+                                    struct mvpp2_tx_queue *aggr_txq,
+                                    struct mvpp2_txq_pcpu *txq_pcpu,
+                                    int sz, bool left, bool last)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+       dma_addr_t buf_dma_addr;
+
+       mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+       mvpp2_txdesc_size_set(port, tx_desc, sz);
+
+       buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
+                                     DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
+               mvpp2_txq_desc_put(txq);
+               return -ENOMEM;
+       }
+
+       mvpp2_txdesc_offset_set(port, tx_desc,
+                               buf_dma_addr & MVPP2_TX_DESC_ALIGN);
+       mvpp2_txdesc_dma_addr_set(port, tx_desc,
+                                 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
+
+       if (!left) {
+               mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
+               if (last) {
+                       mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
+                       return 0;
+               }
+       } else {
+               mvpp2_txdesc_cmd_set(port, tx_desc, 0);
+       }
+
+       mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
+       return 0;
+}
+
+static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
+                       struct mvpp2_tx_queue *txq,
+                       struct mvpp2_tx_queue *aggr_txq,
+                       struct mvpp2_txq_pcpu *txq_pcpu)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       struct tso_t tso;
+       int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       int i, len, descs = 0;
+
+       /* Check number of available descriptors */
+       if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq,
+                                     tso_count_descs(skb)) ||
+           mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu,
+                                            tso_count_descs(skb)))
+               return 0;
+
+       tso_start(skb, &tso);
+       len = skb->len - hdr_sz;
+       while (len > 0) {
+               int left = min_t(int, skb_shinfo(skb)->gso_size, len);
+               char *hdr = txq_pcpu->tso_headers +
+                           txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
+
+               len -= left;
+               descs++;
+
+               tso_build_hdr(skb, hdr, &tso, left, len == 0);
+               mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
+
+               while (left > 0) {
+                       int sz = min_t(int, tso.size, left);
+                       left -= sz;
+                       descs++;
+
+                       if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
+                                              txq_pcpu, sz, left, len == 0))
+                               goto release;
+                       tso_build_data(skb, &tso, sz);
+               }
+       }
+
+       return descs;
+
+release:
+       for (i = descs - 1; i >= 0; i--) {
+               struct mvpp2_tx_desc *tx_desc = txq->descs + i;
+               tx_desc_unmap_put(port, txq, tx_desc);
+       }
+       return 0;
+}
+
 /* Main tx processing */
 static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
 {
@@ -5707,6 +6403,10 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
        txq_pcpu = this_cpu_ptr(txq->pcpu);
        aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
 
+       if (skb_is_gso(skb)) {
+               frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
+               goto out;
+       }
        frags = skb_shinfo(skb)->nr_frags + 1;
 
        /* Check number of available descriptors */
@@ -5756,22 +6456,21 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
                }
        }
 
-       txq_pcpu->reserved_num -= frags;
-       txq_pcpu->count += frags;
-       aggr_txq->count += frags;
-
-       /* Enable transmit */
-       wmb();
-       mvpp2_aggr_txq_pend_desc_add(port, frags);
-
-       if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
-               struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
-
-               netif_tx_stop_queue(nq);
-       }
 out:
        if (frags > 0) {
                struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
+               struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+
+               txq_pcpu->reserved_num -= frags;
+               txq_pcpu->count += frags;
+               aggr_txq->count += frags;
+
+               /* Enable transmit */
+               wmb();
+               mvpp2_aggr_txq_pend_desc_add(port, frags);
+
+               if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1)
+                       netif_tx_stop_queue(nq);
 
                u64_stats_update_begin(&stats->syncp);
                stats->tx_packets++;
@@ -5787,7 +6486,8 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
                mvpp2_txq_done(port, txq, txq_pcpu);
 
        /* Set the timer in case not all frags were processed */
-       if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
+       if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
+           txq_pcpu->count > 0) {
                struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
 
                mvpp2_timer_set(port_pcpu);
@@ -5808,7 +6508,7 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause)
 
 static int mvpp2_poll(struct napi_struct *napi, int budget)
 {
-       u32 cause_rx_tx, cause_rx, cause_misc;
+       u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
        int rx_done = 0;
        struct mvpp2_port *port = netdev_priv(napi->dev);
        struct mvpp2_queue_vector *qv;
@@ -5826,11 +6526,10 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
         *
         * Each CPU has its own Rx/Tx cause register
         */
-       cause_rx_tx = mvpp2_percpu_read(port->priv, cpu,
+       cause_rx_tx = mvpp2_percpu_read(port->priv, qv->sw_thread_id,
                                        MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
-       cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
-       cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
 
+       cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
        if (cause_misc) {
                mvpp2_cause_error(port->dev, cause_misc);
 
@@ -5841,9 +6540,15 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
                                   cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
        }
 
-       cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+       cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+       if (cause_tx) {
+               cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
+               mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
+       }
 
        /* Process RX packets */
+       cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+       cause_rx <<= qv->first_rxq;
        cause_rx |= qv->pending_cause_rx;
        while (cause_rx && budget > 0) {
                int count;
@@ -5881,7 +6586,13 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
        struct net_device *ndev = port->dev;
        int i;
 
-       mvpp2_gmac_max_rx_size_set(port);
+       if (port->gop_id == 0 &&
+           (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
+            port->phy_interface == PHY_INTERFACE_MODE_10GKR))
+               mvpp2_xlg_max_rx_size_set(port);
+       else
+               mvpp2_gmac_max_rx_size_set(port);
+
        mvpp2_txp_max_tx_size_set(port);
 
        for (i = 0; i < port->nqvecs; i++)
@@ -5890,8 +6601,15 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
        /* Enable interrupts on all CPUs */
        mvpp2_interrupts_enable(port);
 
+       if (port->priv->hw_version == MVPP22) {
+               mvpp22_comphy_init(port);
+               mvpp22_gop_init(port);
+       }
+
+       mvpp2_port_mii_set(port);
        mvpp2_port_enable(port);
-       phy_start(ndev->phydev);
+       if (ndev->phydev)
+               phy_start(ndev->phydev);
        netif_tx_start_all_queues(port->dev);
 }
 
@@ -5917,7 +6635,9 @@ static void mvpp2_stop_dev(struct mvpp2_port *port)
 
        mvpp2_egress_disable(port);
        mvpp2_port_disable(port);
-       phy_stop(ndev->phydev);
+       if (ndev->phydev)
+               phy_stop(ndev->phydev);
+       phy_power_off(port->comphy);
 }
 
 static int mvpp2_check_ringparam_valid(struct net_device *dev,
@@ -5973,6 +6693,10 @@ static int mvpp2_phy_connect(struct mvpp2_port *port)
 {
        struct phy_device *phy_dev;
 
+       /* No PHY is attached */
+       if (!port->phy_node)
+               return 0;
+
        phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
                                 port->phy_interface);
        if (!phy_dev) {
@@ -5993,6 +6717,9 @@ static void mvpp2_phy_disconnect(struct mvpp2_port *port)
 {
        struct net_device *ndev = port->dev;
 
+       if (!ndev->phydev)
+               return;
+
        phy_disconnect(ndev->phydev);
 }
 
@@ -6006,6 +6733,10 @@ static int mvpp2_irqs_init(struct mvpp2_port *port)
                err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
                if (err)
                        goto err;
+
+               if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
+                       irq_set_affinity_hint(qv->irq,
+                                             cpumask_of(qv->sw_thread_id));
        }
 
        return 0;
@@ -6013,6 +6744,7 @@ static int mvpp2_irqs_init(struct mvpp2_port *port)
        for (i = 0; i < port->nqvecs; i++) {
                struct mvpp2_queue_vector *qv = port->qvecs + i;
 
+               irq_set_affinity_hint(qv->irq, NULL);
                free_irq(qv->irq, qv);
        }
 
@@ -6026,6 +6758,7 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
        for (i = 0; i < port->nqvecs; i++) {
                struct mvpp2_queue_vector *qv = port->qvecs + i;
 
+               irq_set_affinity_hint(qv->irq, NULL);
                free_irq(qv->irq, qv);
        }
 }
@@ -6033,6 +6766,7 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
 static int mvpp2_open(struct net_device *dev)
 {
        struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2 *priv = port->priv;
        unsigned char mac_bcast[ETH_ALEN] = {
                        0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
        int err;
@@ -6078,20 +6812,36 @@ static int mvpp2_open(struct net_device *dev)
                goto err_cleanup_txqs;
        }
 
+       if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) {
+               err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
+                                 dev->name, port);
+               if (err) {
+                       netdev_err(port->dev, "cannot request link IRQ %d\n",
+                                  port->link_irq);
+                       goto err_free_irq;
+               }
+
+               mvpp22_gop_setup_irq(port);
+       }
+
        /* In default link is down */
        netif_carrier_off(port->dev);
 
        err = mvpp2_phy_connect(port);
        if (err < 0)
-               goto err_free_irq;
+               goto err_free_link_irq;
 
        /* Unmask interrupts on all CPUs */
        on_each_cpu(mvpp2_interrupts_unmask, port, 1);
+       mvpp2_shared_interrupt_mask_unmask(port, false);
 
        mvpp2_start_dev(port);
 
        return 0;
 
+err_free_link_irq:
+       if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
+               free_irq(port->link_irq, port);
 err_free_irq:
        mvpp2_irqs_deinit(port);
 err_cleanup_txqs:
@@ -6105,6 +6855,7 @@ static int mvpp2_stop(struct net_device *dev)
 {
        struct mvpp2_port *port = netdev_priv(dev);
        struct mvpp2_port_pcpu *port_pcpu;
+       struct mvpp2 *priv = port->priv;
        int cpu;
 
        mvpp2_stop_dev(port);
@@ -6112,14 +6863,20 @@ static int mvpp2_stop(struct net_device *dev)
 
        /* Mask interrupts on all CPUs */
        on_each_cpu(mvpp2_interrupts_mask, port, 1);
+       mvpp2_shared_interrupt_mask_unmask(port, true);
+
+       if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
+               free_irq(port->link_irq, port);
 
        mvpp2_irqs_deinit(port);
-       for_each_present_cpu(cpu) {
-               port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+       if (!port->has_tx_irqs) {
+               for_each_present_cpu(cpu) {
+                       port_pcpu = per_cpu_ptr(port->pcpu, cpu);
 
-               hrtimer_cancel(&port_pcpu->tx_done_timer);
-               port_pcpu->timer_scheduled = false;
-               tasklet_kill(&port_pcpu->tx_done_tasklet);
+                       hrtimer_cancel(&port_pcpu->tx_done_timer);
+                       port_pcpu->timer_scheduled = false;
+                       tasklet_kill(&port_pcpu->tx_done_tasklet);
+               }
        }
        mvpp2_cleanup_rxqs(port);
        mvpp2_cleanup_txqs(port);
@@ -6303,10 +7060,18 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
                mvpp2_rx_time_coal_set(port, rxq);
        }
 
+       if (port->has_tx_irqs) {
+               port->tx_time_coal = c->tx_coalesce_usecs;
+               mvpp2_tx_time_coal_set(port);
+       }
+
        for (queue = 0; queue < port->ntxqs; queue++) {
                struct mvpp2_tx_queue *txq = port->txqs[queue];
 
                txq->done_pkts_coal = c->tx_max_coalesced_frames;
+
+               if (port->has_tx_irqs)
+                       mvpp2_tx_pkts_coal_set(port, txq);
        }
 
        return 0;
@@ -6431,8 +7196,11 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
        .set_link_ksettings = phy_ethtool_set_link_ksettings,
 };
 
-static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
-                                   struct device_node *port_node)
+/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
+ * had a single IRQ defined per-port.
+ */
+static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
+                                          struct device_node *port_node)
 {
        struct mvpp2_queue_vector *v = &port->qvecs[0];
 
@@ -6453,6 +7221,66 @@ static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
        return 0;
 }
 
+static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
+                                         struct device_node *port_node)
+{
+       struct mvpp2_queue_vector *v;
+       int i, ret;
+
+       port->nqvecs = num_possible_cpus();
+       if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
+               port->nqvecs += 1;
+
+       for (i = 0; i < port->nqvecs; i++) {
+               char irqname[16];
+
+               v = port->qvecs + i;
+
+               v->port = port;
+               v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
+               v->sw_thread_id = i;
+               v->sw_thread_mask = BIT(i);
+
+               snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
+
+               if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
+                       v->first_rxq = i * MVPP2_DEFAULT_RXQ;
+                       v->nrxqs = MVPP2_DEFAULT_RXQ;
+               } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
+                          i == (port->nqvecs - 1)) {
+                       v->first_rxq = 0;
+                       v->nrxqs = port->nrxqs;
+                       v->type = MVPP2_QUEUE_VECTOR_SHARED;
+                       strncpy(irqname, "rx-shared", sizeof(irqname));
+               }
+
+               v->irq = of_irq_get_byname(port_node, irqname);
+               if (v->irq <= 0) {
+                       ret = -EINVAL;
+                       goto err;
+               }
+
+               netif_napi_add(port->dev, &v->napi, mvpp2_poll,
+                              NAPI_POLL_WEIGHT);
+       }
+
+       return 0;
+
+err:
+       for (i = 0; i < port->nqvecs; i++)
+               irq_dispose_mapping(port->qvecs[i].irq);
+       return ret;
+}
+
+static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
+                                   struct device_node *port_node)
+{
+       if (port->has_tx_irqs)
+               return mvpp2_multi_queue_vectors_init(port, port_node);
+       else
+               return mvpp2_simple_queue_vectors_init(port, port_node);
+}
+
 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
 {
        int i;
@@ -6512,6 +7340,8 @@ static int mvpp2_port_init(struct mvpp2_port *port)
        mvpp2_egress_disable(port);
        mvpp2_port_disable(port);
 
+       port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
+
        port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
                                  GFP_KERNEL);
        if (!port->txqs)
@@ -6611,12 +7441,37 @@ static int mvpp2_port_init(struct mvpp2_port *port)
        return err;
 }
 
+/* Checks if the port DT description has the TX interrupts
+ * described. On PPv2.1, there are no such interrupts. On PPv2.2,
+ * there are available, but we need to keep support for old DTs.
+ */
+static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
+                                  struct device_node *port_node)
+{
+       char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
+                         "tx-cpu2", "tx-cpu3" };
+       int ret, i;
+
+       if (priv->hw_version == MVPP21)
+               return false;
+
+       for (i = 0; i < 5; i++) {
+               ret = of_property_match_string(port_node, "interrupt-names",
+                                              irqs[i]);
+               if (ret < 0)
+                       return false;
+       }
+
+       return true;
+}
+
 /* Ports initialization */
 static int mvpp2_port_probe(struct platform_device *pdev,
                            struct device_node *port_node,
                            struct mvpp2 *priv)
 {
        struct device_node *phy_node;
+       struct phy *comphy;
        struct mvpp2_port *port;
        struct mvpp2_port_pcpu *port_pcpu;
        struct net_device *dev;
@@ -6625,25 +7480,28 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        const char *mac_from;
        char hw_mac_addr[ETH_ALEN];
        unsigned int ntxqs, nrxqs;
+       bool has_tx_irqs;
        u32 id;
        int features;
        int phy_mode;
        int err, i, cpu;
 
+       has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
+
+       if (!has_tx_irqs)
+               queue_mode = MVPP2_QDIST_SINGLE_MODE;
+
        ntxqs = MVPP2_MAX_TXQ;
-       nrxqs = MVPP2_DEFAULT_RXQ;
+       if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
+               nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
+       else
+               nrxqs = MVPP2_DEFAULT_RXQ;
 
        dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
        if (!dev)
                return -ENOMEM;
 
        phy_node = of_parse_phandle(port_node, "phy", 0);
-       if (!phy_node) {
-               dev_err(&pdev->dev, "missing phy\n");
-               err = -ENODEV;
-               goto err_free_netdev;
-       }
-
        phy_mode = of_get_phy_mode(port_node);
        if (phy_mode < 0) {
                dev_err(&pdev->dev, "incorrect phy mode\n");
@@ -6651,6 +7509,15 @@ static int mvpp2_port_probe(struct platform_device *pdev,
                goto err_free_netdev;
        }
 
+       comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
+       if (IS_ERR(comphy)) {
+               if (PTR_ERR(comphy) == -EPROBE_DEFER) {
+                       err = -EPROBE_DEFER;
+                       goto err_free_netdev;
+               }
+               comphy = NULL;
+       }
+
        if (of_property_read_u32(port_node, "port-id", &id)) {
                err = -EINVAL;
                dev_err(&pdev->dev, "missing port-id value\n");
@@ -6666,15 +7533,25 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        port->dev = dev;
        port->ntxqs = ntxqs;
        port->nrxqs = nrxqs;
+       port->priv = priv;
+       port->has_tx_irqs = has_tx_irqs;
 
        err = mvpp2_queue_vectors_init(port, port_node);
        if (err)
                goto err_free_netdev;
 
+       port->link_irq = of_irq_get_byname(port_node, "link");
+       if (port->link_irq == -EPROBE_DEFER) {
+               err = -EPROBE_DEFER;
+               goto err_deinit_qvecs;
+       }
+       if (port->link_irq <= 0)
+               /* the link irq is optional */
+               port->link_irq = 0;
+
        if (of_property_read_bool(port_node, "marvell,loopback"))
                port->flags |= MVPP2_F_LOOPBACK;
 
-       port->priv = priv;
        port->id = id;
        if (priv->hw_version == MVPP21)
                port->first_rxq = port->id * port->nrxqs;
@@ -6683,13 +7560,14 @@ static int mvpp2_port_probe(struct platform_device *pdev,
 
        port->phy_node = phy_node;
        port->phy_interface = phy_mode;
+       port->comphy = comphy;
 
        if (priv->hw_version == MVPP21) {
                res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
                port->base = devm_ioremap_resource(&pdev->dev, res);
                if (IS_ERR(port->base)) {
                        err = PTR_ERR(port->base);
-                       goto err_deinit_qvecs;
+                       goto err_free_irq;
                }
        } else {
                if (of_property_read_u32(port_node, "gop-port-id",
@@ -6706,7 +7584,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
        if (!port->stats) {
                err = -ENOMEM;
-               goto err_deinit_qvecs;
+               goto err_free_irq;
        }
 
        dt_mac_addr = of_get_mac_address(port_node);
@@ -6735,7 +7613,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
                goto err_free_stats;
        }
 
-       mvpp2_port_mii_set(port);
        mvpp2_port_periodic_xon_disable(port);
 
        if (priv->hw_version == MVPP21)
@@ -6749,19 +7626,22 @@ static int mvpp2_port_probe(struct platform_device *pdev,
                goto err_free_txq_pcpu;
        }
 
-       for_each_present_cpu(cpu) {
-               port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+       if (!port->has_tx_irqs) {
+               for_each_present_cpu(cpu) {
+                       port_pcpu = per_cpu_ptr(port->pcpu, cpu);
 
-               hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
-                            HRTIMER_MODE_REL_PINNED);
-               port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
-               port_pcpu->timer_scheduled = false;
+                       hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
+                                    HRTIMER_MODE_REL_PINNED);
+                       port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+                       port_pcpu->timer_scheduled = false;
 
-               tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
-                            (unsigned long)dev);
+                       tasklet_init(&port_pcpu->tx_done_tasklet,
+                                    mvpp2_tx_proc_cb,
+                                    (unsigned long)dev);
+               }
        }
 
-       features = NETIF_F_SG | NETIF_F_IP_CSUM;
+       features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
        dev->features = features | NETIF_F_RXCSUM;
        dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
        dev->vlan_features |= features;
@@ -6788,6 +7668,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
                free_percpu(port->txqs[i]->pcpu);
 err_free_stats:
        free_percpu(port->stats);
+err_free_irq:
+       if (port->link_irq)
+               irq_dispose_mapping(port->link_irq);
 err_deinit_qvecs:
        mvpp2_queue_vectors_deinit(port);
 err_free_netdev:
@@ -6808,6 +7691,8 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
        for (i = 0; i < port->ntxqs; i++)
                free_percpu(port->txqs[i]->pcpu);
        mvpp2_queue_vectors_deinit(port);
+       if (port->link_irq)
+               irq_dispose_mapping(port->link_irq);
        free_netdev(port->dev);
 }
 
@@ -6951,8 +7836,7 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
        for_each_present_cpu(i) {
                priv->aggr_txqs[i].id = i;
                priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
-               err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
-                                         MVPP2_AGGR_TXQ_SIZE, i, priv);
+               err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
                if (err < 0)
                        return err;
        }
@@ -7015,6 +7899,17 @@ static int mvpp2_probe(struct platform_device *pdev)
                priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
                if (IS_ERR(priv->iface_base))
                        return PTR_ERR(priv->iface_base);
+
+               priv->sysctrl_base =
+                       syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+                                                       "marvell,system-controller");
+               if (IS_ERR(priv->sysctrl_base))
+                       /* The system controller regmap is optional for dt
+                        * compatibility reasons. When not provided, the
+                        * configuration of the GoP relies on the
+                        * firmware/bootloader.
+                        */
+                       priv->sysctrl_base = NULL;
        }
 
        for (i = 0; i < MVPP2_MAX_THREADS; i++) {