]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge tag 'riscv-for-linus-4.19-rc7' of git://git.kernel.org/pub/scm/linux/kernel...
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 4 Oct 2018 16:16:11 +0000 (09:16 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 4 Oct 2018 16:16:11 +0000 (09:16 -0700)
Palmer writes:
  "A Single RISC-V Fix for 4.19-rc7

   This tag contains a single patch that managed to get lost in the
   shuffle, which explains why it's so late.  This single line has been
   floating around in various patch sets for months, and fixes our DMA32
   region."

* tag 'riscv-for-linus-4.19-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux:
  RISCV: Fix end PFN for low memory

155 files changed:
Documentation/fb/uvesafb.txt
Documentation/networking/ip-sysctl.txt
MAINTAINERS
arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
arch/arm/boot/dts/bcm63138.dtsi
arch/arm/boot/dts/stm32mp157c.dtsi
arch/arm/boot/dts/sun8i-r40.dtsi
arch/arm64/kvm/guest.c
arch/arm64/mm/hugetlbpage.c
drivers/hid/hid-ids.h
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/intel-ish-hid/ipc/hw-ish.h
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/media/v4l2-core/v4l2-event.c
drivers/media/v4l2-core/v4l2-fh.c
drivers/mmc/core/host.c
drivers/mmc/core/slot-gpio.c
drivers/mmc/host/renesas_sdhi_sys_dmac.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amd/declance.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns/hnae.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/huawei/hinic/hinic_main.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/transobj.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qed/qed_hsi.h
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
drivers/net/ethernet/qlogic/qed/qed_rdma.c
drivers/net/ethernet/qlogic/qed/qed_roce.c
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
drivers/net/ethernet/qlogic/qed/qed_vf.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/falcon/efx.c
drivers/net/ieee802154/adf7242.c
drivers/net/ieee802154/ca8210.c
drivers/net/ieee802154/mcr20a.c
drivers/net/phy/phy_device.c
drivers/net/phy/sfp.c
drivers/net/tun.c
drivers/net/usb/asix_common.c
drivers/net/usb/ax88179_178a.c
drivers/net/usb/lan78xx.c
drivers/net/usb/r8152.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/smsc95xx.c
drivers/net/usb/sr9800.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wimax/i2400m/control.c
drivers/net/wireless/broadcom/b43/dma.c
drivers/net/wireless/intel/iwlwifi/cfg/1000.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mediatek/mt76/mt76x0/main.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/hash.c
drivers/net/xen-netback/interface.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_mpc.c
drivers/s390/net/qeth_core_mpc.h
drivers/soc/fsl/qbman/qman.c
drivers/soc/fsl/qe/ucc.c
drivers/video/fbdev/efifb.c
drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
drivers/video/fbdev/pxa168fb.c
drivers/video/fbdev/stifb.c
fs/pstore/ram.c
fs/xattr.c
include/linux/mlx5/transobj.h
include/linux/netdevice.h
include/linux/netfilter.h
include/media/v4l2-fh.h
include/net/bonding.h
include/net/cfg80211.h
include/net/inet_sock.h
include/net/netlink.h
include/trace/events/rxrpc.h
net/bluetooth/mgmt.c
net/bluetooth/smp.c
net/bluetooth/smp.h
net/bridge/br_netfilter_hooks.c
net/core/ethtool.c
net/core/netpoll.c
net/core/rtnetlink.c
net/dccp/input.c
net/dccp/ipv4.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_sockglue.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/xfrm4_input.c
net/ipv4/xfrm4_mode_transport.c
net/ipv6/route.c
net/ipv6/xfrm6_input.c
net/ipv6/xfrm6_mode_transport.c
net/ipv6/xfrm6_output.c
net/mac80211/iface.c
net/mac80211/mesh.h
net/mac80211/mesh_hwmp.c
net/mac80211/status.c
net/mac80211/tdls.c
net/mac80211/tx.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nft_osf.c
net/netfilter/nft_set_rbtree.c
net/netfilter/xt_socket.c
net/openvswitch/conntrack.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_object.c
net/rxrpc/conn_client.c
net/rxrpc/conn_object.c
net/rxrpc/input.c
net/rxrpc/local_object.c
net/rxrpc/output.c
net/rxrpc/peer_event.c
net/rxrpc/peer_object.c
net/rxrpc/protocol.h
net/sched/act_ipt.c
net/sctp/outqueue.c
net/tipc/bearer.c
net/tipc/link.c
net/tipc/link.h
net/tipc/node.c
net/tipc/socket.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/scan.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
tools/testing/selftests/rseq/param_test.c

index f6362d88763b852e0704321af6d22653b95cc4d6..aa924196c36603abcc4d72a619ebce5635050ca4 100644 (file)
@@ -15,7 +15,8 @@ than x86.  Check the v86d documentation for a list of currently supported
 arches.
 
 v86d source code can be downloaded from the following website:
-  http://dev.gentoo.org/~spock/projects/uvesafb
+
+  https://github.com/mjanusz/v86d
 
 Please refer to the v86d documentation for detailed configuration and
 installation instructions.
@@ -177,7 +178,7 @@ from the Video BIOS if you set pixclock to 0 in fb_var_screeninfo.
 
 --
  Michal Januszewski <spock@gentoo.org>
- Last updated: 2009-03-30
+ Last updated: 2017-10-10
 
  Documentation of the uvesafb options is loosely based on vesafb.txt.
 
index 8313a636dd533540172859653bcfa173c1e03864..960de8fe3f401c7ce4ceee0d5d3d61cb46102319 100644 (file)
@@ -425,7 +425,7 @@ tcp_mtu_probing - INTEGER
          1 - Disabled by default, enabled when an ICMP black hole detected
          2 - Always enabled, use initial MSS of tcp_base_mss.
 
-tcp_probe_interval - INTEGER
+tcp_probe_interval - UNSIGNED INTEGER
        Controls how often to start TCP Packetization-Layer Path MTU
        Discovery reprobe. The default is reprobing every 10 minutes as
        per RFC4821.
index b22e7fdfd2ea95c8188e8d6a08aa3050b32b6a79..22065048d89da12230b3c5a7260f02eaf8d77101 100644 (file)
@@ -1251,7 +1251,7 @@ N:        meson
 
 ARM/Annapurna Labs ALPINE ARCHITECTURE
 M:     Tsahee Zidenberg <tsahee@annapurnalabs.com>
-M:     Antoine Tenart <antoine.tenart@free-electrons.com>
+M:     Antoine Tenart <antoine.tenart@bootlin.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-alpine/
@@ -2956,7 +2956,6 @@ F:        include/linux/bcm963xx_tag.h
 
 BROADCOM BNX2 GIGABIT ETHERNET DRIVER
 M:     Rasesh Mody <rasesh.mody@cavium.com>
-M:     Harish Patil <harish.patil@cavium.com>
 M:     Dept-GELinuxNICDev@cavium.com
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -2977,6 +2976,7 @@ F:        drivers/scsi/bnx2i/
 
 BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
 M:     Ariel Elior <ariel.elior@cavium.com>
+M:     Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
 M:     everest-linux-l2@cavium.com
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -5470,7 +5470,8 @@ S:        Odd Fixes
 F:     drivers/net/ethernet/agere/
 
 ETHERNET BRIDGE
-M:     Stephen Hemminger <stephen@networkplumber.org>
+M:     Roopa Prabhu <roopa@cumulusnetworks.com>
+M:     Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
 L:     bridge@lists.linux-foundation.org (moderated for non-subscribers)
 L:     netdev@vger.kernel.org
 W:     http://www.linuxfoundation.org/en/Net:Bridge
@@ -11979,7 +11980,7 @@ F:      Documentation/scsi/LICENSE.qla4xxx
 F:     drivers/scsi/qla4xxx/
 
 QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M:     Harish Patil <harish.patil@cavium.com>
+M:     Shahed Shaikh <Shahed.Shaikh@cavium.com>
 M:     Manish Chopra <manish.chopra@cavium.com>
 M:     Dept-GELinuxNICDev@cavium.com
 L:     netdev@vger.kernel.org
@@ -11987,7 +11988,6 @@ S:      Supported
 F:     drivers/net/ethernet/qlogic/qlcnic/
 
 QLOGIC QLGE 10Gb ETHERNET DRIVER
-M:     Harish Patil <harish.patil@cavium.com>
 M:     Manish Chopra <manish.chopra@cavium.com>
 M:     Dept-GELinuxNICDev@cavium.com
 L:     netdev@vger.kernel.org
@@ -15395,7 +15395,7 @@ S:      Maintained
 UVESAFB DRIVER
 M:     Michal Januszewski <spock@gentoo.org>
 L:     linux-fbdev@vger.kernel.org
-W:     http://dev.gentoo.org/~spock/projects/uvesafb/
+W:     https://github.com/mjanusz/v86d
 S:     Maintained
 F:     Documentation/fb/uvesafb.txt
 F:     drivers/video/fbdev/uvesafb.*
index b10dccd0958f335ce3d874aa8d9eb171336a882a..3b1baa8605a77e8f724724550e5ec123df608732 100644 (file)
@@ -11,6 +11,7 @@
 #include "sama5d2-pinfunc.h"
 #include <dt-bindings/mfd/atmel-flexcom.h>
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/at91.h>
 
 / {
        model = "Atmel SAMA5D2 PTC EK";
@@ -299,6 +300,7 @@ re_we_data {
                                                         <PIN_PA30__NWE_NANDWE>,
                                                         <PIN_PB2__NRD_NANDOE>;
                                                bias-pull-up;
+                                               atmel,drive-strength = <ATMEL_PIO_DRVSTR_ME>;
                                        };
 
                                        ale_cle_rdy_cs {
index 43ee992ccdcf70230cf1f50a33c3c51a6b483f2f..6df61518776f7e45ef8a290fd1920ab675ca649c 100644 (file)
@@ -106,21 +106,23 @@ gic: interrupt-controller@1e100 {
                global_timer: timer@1e200 {
                        compatible = "arm,cortex-a9-global-timer";
                        reg = <0x1e200 0x20>;
-                       interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
                        clocks = <&axi_clk>;
                };
 
                local_timer: local-timer@1e600 {
                        compatible = "arm,cortex-a9-twd-timer";
                        reg = <0x1e600 0x20>;
-                       interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
+                                                 IRQ_TYPE_EDGE_RISING)>;
                        clocks = <&axi_clk>;
                };
 
                twd_watchdog: watchdog@1e620 {
                        compatible = "arm,cortex-a9-twd-wdt";
                        reg = <0x1e620 0x20>;
-                       interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
+                                                 IRQ_TYPE_LEVEL_HIGH)>;
                };
 
                armpll: armpll {
@@ -158,7 +160,7 @@ timer: timer@80 {
                serial0: serial@600 {
                        compatible = "brcm,bcm6345-uart";
                        reg = <0x600 0x1b>;
-                       interrupts = <GIC_SPI 32 0>;
+                       interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&periph_clk>;
                        clock-names = "periph";
                        status = "disabled";
@@ -167,7 +169,7 @@ serial0: serial@600 {
                serial1: serial@620 {
                        compatible = "brcm,bcm6345-uart";
                        reg = <0x620 0x1b>;
-                       interrupts = <GIC_SPI 33 0>;
+                       interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&periph_clk>;
                        clock-names = "periph";
                        status = "disabled";
@@ -180,7 +182,7 @@ nand: nand@2000 {
                        reg = <0x2000 0x600>, <0xf0 0x10>;
                        reg-names = "nand", "nand-int-base";
                        status = "disabled";
-                       interrupts = <GIC_SPI 38 0>;
+                       interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "nand";
                };
 
index 661be948ab7424759ebfdb2d1c780822d17f38b0..185541a5b69fb58127136284f86341845b963af3 100644 (file)
@@ -1078,8 +1078,8 @@ spi6: spi@5c001000 {
                        interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&rcc SPI6_K>;
                        resets = <&rcc SPI6_R>;
-                       dmas = <&mdma1 34 0x0 0x40008 0x0 0x0 0>,
-                              <&mdma1 35 0x0 0x40002 0x0 0x0 0>;
+                       dmas = <&mdma1 34 0x0 0x40008 0x0 0x0>,
+                              <&mdma1 35 0x0 0x40002 0x0 0x0>;
                        dma-names = "rx", "tx";
                        status = "disabled";
                };
index ffd9f00f74a46da89d88040db9178c8ef01a37d4..5f547c161bafd23a3054b6c084599955d753314b 100644 (file)
@@ -800,8 +800,7 @@ hdmi_out: port@1 {
                };
 
                hdmi_phy: hdmi-phy@1ef0000 {
-                       compatible = "allwinner,sun8i-r40-hdmi-phy",
-                                    "allwinner,sun50i-a64-hdmi-phy";
+                       compatible = "allwinner,sun8i-r40-hdmi-phy";
                        reg = <0x01ef0000 0x10000>;
                        clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>,
                                 <&ccu 7>, <&ccu 16>;
index 07256b08226c0c935d7ced6530a4a7a85ee4c276..a6c9fbaeaefcdd71d0ea70c8eeb89c55692f8b66 100644 (file)
@@ -57,6 +57,45 @@ static u64 core_reg_offset_from_id(u64 id)
        return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
 }
 
+static int validate_core_offset(const struct kvm_one_reg *reg)
+{
+       u64 off = core_reg_offset_from_id(reg->id);
+       int size;
+
+       switch (off) {
+       case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+            KVM_REG_ARM_CORE_REG(regs.regs[30]):
+       case KVM_REG_ARM_CORE_REG(regs.sp):
+       case KVM_REG_ARM_CORE_REG(regs.pc):
+       case KVM_REG_ARM_CORE_REG(regs.pstate):
+       case KVM_REG_ARM_CORE_REG(sp_el1):
+       case KVM_REG_ARM_CORE_REG(elr_el1):
+       case KVM_REG_ARM_CORE_REG(spsr[0]) ...
+            KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
+               size = sizeof(__u64);
+               break;
+
+       case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+            KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+               size = sizeof(__uint128_t);
+               break;
+
+       case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+       case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+               size = sizeof(__u32);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       if (KVM_REG_SIZE(reg->id) == size &&
+           IS_ALIGNED(off, size / sizeof(__u32)))
+               return 0;
+
+       return -EINVAL;
+}
+
 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 {
        /*
@@ -76,6 +115,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
            (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
                return -ENOENT;
 
+       if (validate_core_offset(reg))
+               return -EINVAL;
+
        if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
                return -EFAULT;
 
@@ -98,6 +140,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
            (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
                return -ENOENT;
 
+       if (validate_core_offset(reg))
+               return -EINVAL;
+
        if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
                return -EINVAL;
 
@@ -107,17 +152,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        }
 
        if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
-               u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
+               u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
                switch (mode) {
                case PSR_AA32_MODE_USR:
+                       if (!system_supports_32bit_el0())
+                               return -EINVAL;
+                       break;
                case PSR_AA32_MODE_FIQ:
                case PSR_AA32_MODE_IRQ:
                case PSR_AA32_MODE_SVC:
                case PSR_AA32_MODE_ABT:
                case PSR_AA32_MODE_UND:
+                       if (!vcpu_el1_is_32bit(vcpu))
+                               return -EINVAL;
+                       break;
                case PSR_MODE_EL0t:
                case PSR_MODE_EL1t:
                case PSR_MODE_EL1h:
+                       if (vcpu_el1_is_32bit(vcpu))
+                               return -EINVAL;
                        break;
                default:
                        err = -EINVAL;
index 192b3ba070755f70d41f13d3c68eaa18b2b7f17d..f58ea503ad014fda52fbab06e6edc743551a4b6c 100644 (file)
@@ -117,11 +117,14 @@ static pte_t get_clear_flush(struct mm_struct *mm,
 
                /*
                 * If HW_AFDBM is enabled, then the HW could turn on
-                * the dirty bit for any page in the set, so check
-                * them all.  All hugetlb entries are already young.
+                * the dirty or accessed bit for any page in the set,
+                * so check them all.
                 */
                if (pte_dirty(pte))
                        orig_pte = pte_mkdirty(orig_pte);
+
+               if (pte_young(pte))
+                       orig_pte = pte_mkyoung(orig_pte);
        }
 
        if (valid) {
@@ -320,11 +323,40 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
        return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
 }
 
+/*
+ * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
+ * and write permission.
+ *
+ * For a contiguous huge pte range we need to check whether or not write
+ * permission has to change only on the first pte in the set. Then for
+ * all the contiguous ptes we need to check whether or not there is a
+ * discrepancy between dirty or young.
+ */
+static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
+{
+       int i;
+
+       if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
+               return 1;
+
+       for (i = 0; i < ncontig; i++) {
+               pte_t orig_pte = huge_ptep_get(ptep + i);
+
+               if (pte_dirty(pte) != pte_dirty(orig_pte))
+                       return 1;
+
+               if (pte_young(pte) != pte_young(orig_pte))
+                       return 1;
+       }
+
+       return 0;
+}
+
 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
                               unsigned long addr, pte_t *ptep,
                               pte_t pte, int dirty)
 {
-       int ncontig, i, changed = 0;
+       int ncontig, i;
        size_t pgsize = 0;
        unsigned long pfn = pte_pfn(pte), dpfn;
        pgprot_t hugeprot;
@@ -336,19 +368,23 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
        ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
        dpfn = pgsize >> PAGE_SHIFT;
 
+       if (!__cont_access_flags_changed(ptep, pte, ncontig))
+               return 0;
+
        orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
-       if (!pte_same(orig_pte, pte))
-               changed = 1;
 
-       /* Make sure we don't lose the dirty state */
+       /* Make sure we don't lose the dirty or young state */
        if (pte_dirty(orig_pte))
                pte = pte_mkdirty(pte);
 
+       if (pte_young(orig_pte))
+               pte = pte_mkyoung(pte);
+
        hugeprot = pte_pgprot(pte);
        for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
                set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
 
-       return changed;
+       return 1;
 }
 
 void huge_ptep_set_wrprotect(struct mm_struct *mm,
index 5146ee029db4bd6c35bb3a15cdd25b6caf54c64e..bc49909aba8e664b6675fcac13921128c661dce9 100644 (file)
 #define USB_DEVICE_ID_SIS817_TOUCH     0x0817
 #define USB_DEVICE_ID_SIS_TS           0x1013
 #define USB_DEVICE_ID_SIS1030_TOUCH    0x1030
-#define USB_DEVICE_ID_SIS10FB_TOUCH    0x10fb
 
 #define USB_VENDOR_ID_SKYCABLE                 0x1223
 #define        USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER       0x3F07
index f3076659361abcb0567804c298af25e278de8fa9..4e3592e7a3f7217f86fe0fba59d3ea73551dcab2 100644 (file)
@@ -47,7 +47,7 @@
 /* quirks to control the device */
 #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV       BIT(0)
 #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET       BIT(1)
-#define I2C_HID_QUIRK_RESEND_REPORT_DESCR      BIT(2)
+#define I2C_HID_QUIRK_NO_RUNTIME_PM            BIT(2)
 
 /* flags */
 #define I2C_HID_STARTED                0
@@ -169,9 +169,8 @@ static const struct i2c_hid_quirks {
        { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
                I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
        { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
-               I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
-       { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
-               I2C_HID_QUIRK_RESEND_REPORT_DESCR },
+               I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
+               I2C_HID_QUIRK_NO_RUNTIME_PM },
        { 0, 0 }
 };
 
@@ -1105,7 +1104,9 @@ static int i2c_hid_probe(struct i2c_client *client,
                goto err_mem_free;
        }
 
-       pm_runtime_put(&client->dev);
+       if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
+               pm_runtime_put(&client->dev);
+
        return 0;
 
 err_mem_free:
@@ -1130,7 +1131,8 @@ static int i2c_hid_remove(struct i2c_client *client)
        struct i2c_hid *ihid = i2c_get_clientdata(client);
        struct hid_device *hid;
 
-       pm_runtime_get_sync(&client->dev);
+       if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
+               pm_runtime_get_sync(&client->dev);
        pm_runtime_disable(&client->dev);
        pm_runtime_set_suspended(&client->dev);
        pm_runtime_put_noidle(&client->dev);
@@ -1236,22 +1238,13 @@ static int i2c_hid_resume(struct device *dev)
 
        /* Instead of resetting device, simply powers the device on. This
         * solves "incomplete reports" on Raydium devices 2386:3118 and
-        * 2386:4B33
+        * 2386:4B33 and fixes various SIS touchscreens no longer sending
+        * data after a suspend/resume.
         */
        ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
        if (ret)
                return ret;
 
-       /* Some devices need to re-send report descr cmd
-        * after resume, after this it will be back normal.
-        * otherwise it issues too many incomplete reports.
-        */
-       if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
-               ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
-               if (ret)
-                       return ret;
-       }
-
        if (hid->driver && hid->driver->reset_resume) {
                ret = hid->driver->reset_resume(hid);
                return ret;
index da133716bed05b63dadef22e072f05b4e7b0f5da..08a8327dfd224852cb81599959eeac09fd0c5a9a 100644 (file)
@@ -29,6 +29,7 @@
 #define CNL_Ax_DEVICE_ID       0x9DFC
 #define GLK_Ax_DEVICE_ID       0x31A2
 #define CNL_H_DEVICE_ID                0xA37C
+#define ICL_MOBILE_DEVICE_ID   0x34FC
 #define SPT_H_DEVICE_ID                0xA135
 
 #define        REVISION_ID_CHT_A0      0x6
index a1125a5c7965a255f8b5480f47cc8a53b534b76f..256b3016116cecca6ece2f8ae2d94422cd875251 100644 (file)
@@ -38,6 +38,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
        {0, }
 };
index 127fe6eb91d9832289124a399b03b2d6c88715c3..a3ef1f50a4b3496dcfbe7cb4332a3bab9a3acc56 100644 (file)
@@ -115,14 +115,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
        if (sev == NULL)
                return;
 
-       /*
-        * If the event has been added to the fh->subscribed list, but its
-        * add op has not completed yet elems will be 0, treat this as
-        * not being subscribed.
-        */
-       if (!sev->elems)
-               return;
-
        /* Increase event sequence number on fh. */
        fh->sequence++;
 
@@ -208,6 +200,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
        struct v4l2_subscribed_event *sev, *found_ev;
        unsigned long flags;
        unsigned i;
+       int ret = 0;
 
        if (sub->type == V4L2_EVENT_ALL)
                return -EINVAL;
@@ -225,31 +218,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
        sev->flags = sub->flags;
        sev->fh = fh;
        sev->ops = ops;
+       sev->elems = elems;
+
+       mutex_lock(&fh->subscribe_lock);
 
        spin_lock_irqsave(&fh->vdev->fh_lock, flags);
        found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
-       if (!found_ev)
-               list_add(&sev->list, &fh->subscribed);
        spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
        if (found_ev) {
+               /* Already listening */
                kvfree(sev);
-               return 0; /* Already listening */
+               goto out_unlock;
        }
 
        if (sev->ops && sev->ops->add) {
-               int ret = sev->ops->add(sev, elems);
+               ret = sev->ops->add(sev, elems);
                if (ret) {
-                       sev->ops = NULL;
-                       v4l2_event_unsubscribe(fh, sub);
-                       return ret;
+                       kvfree(sev);
+                       goto out_unlock;
                }
        }
 
-       /* Mark as ready for use */
-       sev->elems = elems;
+       spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+       list_add(&sev->list, &fh->subscribed);
+       spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
-       return 0;
+out_unlock:
+       mutex_unlock(&fh->subscribe_lock);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
 
@@ -288,6 +286,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
                return 0;
        }
 
+       mutex_lock(&fh->subscribe_lock);
+
        spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 
        sev = v4l2_event_subscribed(fh, sub->type, sub->id);
@@ -305,6 +305,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
        if (sev && sev->ops && sev->ops->del)
                sev->ops->del(sev);
 
+       mutex_unlock(&fh->subscribe_lock);
+
        kvfree(sev);
 
        return 0;
index 3895999bf8805c3c208a4f042cb59b786a6ec44c..c91a7bd3ecfc7d14853b56a8de273d35ac0ff870 100644 (file)
@@ -45,6 +45,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
        INIT_LIST_HEAD(&fh->available);
        INIT_LIST_HEAD(&fh->subscribed);
        fh->sequence = -1;
+       mutex_init(&fh->subscribe_lock);
 }
 EXPORT_SYMBOL_GPL(v4l2_fh_init);
 
@@ -90,6 +91,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
                return;
        v4l_disable_media_source(fh->vdev);
        v4l2_event_unsubscribe_all(fh);
+       mutex_destroy(&fh->subscribe_lock);
        fh->vdev = NULL;
 }
 EXPORT_SYMBOL_GPL(v4l2_fh_exit);
index abf9e884386c4cc42edee4113bb1a989167dba55..f57f5de5420647619714c65861896252700d302c 100644 (file)
@@ -235,7 +235,7 @@ int mmc_of_parse(struct mmc_host *host)
                        host->caps |= MMC_CAP_NEEDS_POLL;
 
                ret = mmc_gpiod_request_cd(host, "cd", 0, true,
-                                          cd_debounce_delay_ms,
+                                          cd_debounce_delay_ms * 1000,
                                           &cd_gpio_invert);
                if (!ret)
                        dev_info(host->parent, "Got CD GPIO\n");
index 2a833686784b6b459d9744b366ef22cb5ca1279c..86803a3a04dc9609a0c55de2df03f3a9e8cb1341 100644 (file)
@@ -271,7 +271,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
        if (debounce) {
                ret = gpiod_set_debounce(desc, debounce);
                if (ret < 0)
-                       ctx->cd_debounce_delay_ms = debounce;
+                       ctx->cd_debounce_delay_ms = debounce / 1000;
        }
 
        if (gpio_invert)
index 890f192dedbdcc9cb693c2c4159c1b0727cad216..5389c48218820166209a7de463c01084366b1fe4 100644 (file)
@@ -498,7 +498,8 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
 
 static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
 {
-       if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible &&
+       if ((of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible ||
+           of_device_get_match_data(&pdev->dev) == &of_rcar_r8a7795_compatible) &&
            !soc_device_match(gen3_soc_whitelist))
                return -ENODEV;
 
index 0d87e11e7f1d84537fe43d95249b1bd3a2ce291d..ee28ec9e0abaddd13053da9fda5c0cefc722555d 100644 (file)
@@ -210,6 +210,7 @@ static void bond_get_stats(struct net_device *bond_dev,
 static void bond_slave_arr_handler(struct work_struct *work);
 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
                                  int mod);
+static void bond_netdev_notify_work(struct work_struct *work);
 
 /*---------------------------- General routines -----------------------------*/
 
@@ -1170,9 +1171,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
                }
        }
 
-       /* don't change skb->dev for link-local packets */
-       if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+       /* Link-local multicast packets should be passed to the
+        * stack on the link they arrive as well as pass them to the
+        * bond-master device. These packets are mostly usable when
+        * stack receives it with the link on which they arrive
+        * (e.g. LLDP) they also must be available on master. Some of
+        * the use cases include (but are not limited to): LLDP agents
+        * that must be able to operate both on enslaved interfaces as
+        * well as on bonds themselves; linux bridges that must be able
+        * to process/pass BPDUs from attached bonds when any kind of
+        * STP version is enabled on the network.
+        */
+       if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
+               struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+               if (nskb) {
+                       nskb->dev = bond->dev;
+                       nskb->queue_mapping = 0;
+                       netif_rx(nskb);
+               }
                return RX_HANDLER_PASS;
+       }
        if (bond_should_deliver_exact_match(skb, slave, bond))
                return RX_HANDLER_EXACT;
 
@@ -1269,6 +1288,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
                        return NULL;
                }
        }
+       INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
+
        return slave;
 }
 
@@ -1276,6 +1297,7 @@ static void bond_free_slave(struct slave *slave)
 {
        struct bonding *bond = bond_get_bond_by_slave(slave);
 
+       cancel_delayed_work_sync(&slave->notify_work);
        if (BOND_MODE(bond) == BOND_MODE_8023AD)
                kfree(SLAVE_AD_INFO(slave));
 
@@ -1297,39 +1319,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
        info->link_failure_count = slave->link_failure_count;
 }
 
-static void bond_netdev_notify(struct net_device *dev,
-                              struct netdev_bonding_info *info)
-{
-       rtnl_lock();
-       netdev_bonding_info_change(dev, info);
-       rtnl_unlock();
-}
-
 static void bond_netdev_notify_work(struct work_struct *_work)
 {
-       struct netdev_notify_work *w =
-               container_of(_work, struct netdev_notify_work, work.work);
+       struct slave *slave = container_of(_work, struct slave,
+                                          notify_work.work);
+
+       if (rtnl_trylock()) {
+               struct netdev_bonding_info binfo;
 
-       bond_netdev_notify(w->dev, &w->bonding_info);
-       dev_put(w->dev);
-       kfree(w);
+               bond_fill_ifslave(slave, &binfo.slave);
+               bond_fill_ifbond(slave->bond, &binfo.master);
+               netdev_bonding_info_change(slave->dev, &binfo);
+               rtnl_unlock();
+       } else {
+               queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
+       }
 }
 
 void bond_queue_slave_event(struct slave *slave)
 {
-       struct bonding *bond = slave->bond;
-       struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
-
-       if (!nnw)
-               return;
-
-       dev_hold(slave->dev);
-       nnw->dev = slave->dev;
-       bond_fill_ifslave(slave, &nnw->bonding_info.slave);
-       bond_fill_ifbond(bond, &nnw->bonding_info.master);
-       INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
-
-       queue_delayed_work(slave->bond->wq, &nnw->work, 0);
+       queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
 }
 
 void bond_lower_state_changed(struct slave *slave)
index 29b5774dd32d47e5ad2c43e78b599493f011d129..25621a218f20754c29963c7ded3075c0c89a232c 100644 (file)
@@ -2185,25 +2185,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ena_netpoll(struct net_device *netdev)
-{
-       struct ena_adapter *adapter = netdev_priv(netdev);
-       int i;
-
-       /* Dont schedule NAPI if the driver is in the middle of reset
-        * or netdev is down.
-        */
-
-       if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
-           test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
-               return;
-
-       for (i = 0; i < adapter->num_queues; i++)
-               napi_schedule(&adapter->ena_napi[i].napi);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
                            struct net_device *sb_dev,
                            select_queue_fallback_t fallback)
@@ -2369,9 +2350,6 @@ static const struct net_device_ops ena_netdev_ops = {
        .ndo_change_mtu         = ena_change_mtu,
        .ndo_set_mac_address    = NULL,
        .ndo_validate_addr      = eth_validate_addr,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ena_netpoll,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
 };
 
 static int ena_device_validate_params(struct ena_adapter *adapter,
index 116997a8b5930600f57f330b140785f2acd411ad..00332a1ea84b9e770febc38dd69af42e3d5bba90 100644 (file)
@@ -1031,6 +1031,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
        int i, ret;
        unsigned long esar_base;
        unsigned char *esar;
+       const char *desc;
 
        if (dec_lance_debug && version_printed++ == 0)
                printk(version);
@@ -1216,19 +1217,20 @@ static int dec_lance_probe(struct device *bdev, const int type)
         */
        switch (type) {
        case ASIC_LANCE:
-               printk("%s: IOASIC onboard LANCE", name);
+               desc = "IOASIC onboard LANCE";
                break;
        case PMAD_LANCE:
-               printk("%s: PMAD-AA", name);
+               desc = "PMAD-AA";
                break;
        case PMAX_LANCE:
-               printk("%s: PMAX onboard LANCE", name);
+               desc = "PMAX onboard LANCE";
                break;
        }
        for (i = 0; i < 6; i++)
                dev->dev_addr[i] = esar[i * 4];
 
-       printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
+       printk("%s: %s, addr = %pM, irq = %d\n",
+              name, desc, dev->dev_addr, dev->irq);
 
        dev->netdev_ops = &lance_netdev_ops;
        dev->watchdog_timeo = 5*HZ;
index 147045757b103309e2656f36bc3337acef7b383f..c57238fce86377b0095b8bfae082b1e776d06c20 100644 (file)
@@ -1069,9 +1069,6 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
 {
        u32 reg;
 
-       /* Stop monitoring MPD interrupt */
-       intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
-
        /* Disable RXCHK, active filters and Broadcom tag matching */
        reg = rxchk_readl(priv, RXCHK_CONTROL);
        reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
@@ -1081,6 +1078,17 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
        /* Clear the MagicPacket detection logic */
        mpd_enable_set(priv, false);
 
+       reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
+       if (reg & INTRL2_0_MPD)
+               netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
+
+       if (reg & INTRL2_0_BRCM_MATCH_TAG) {
+               reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
+                                 RXCHK_BRCM_TAG_MATCH_MASK;
+               netdev_info(priv->netdev,
+                           "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
+       }
+
        netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
 }
 
@@ -1105,7 +1113,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        struct bcm_sysport_tx_ring *txr;
        unsigned int ring, ring_bit;
-       u32 reg;
 
        priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
                          ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
@@ -1131,16 +1138,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
        if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
                bcm_sysport_tx_reclaim_all(priv);
 
-       if (priv->irq0_stat & INTRL2_0_MPD)
-               netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
-
-       if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) {
-               reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
-                                 RXCHK_BRCM_TAG_MATCH_MASK;
-               netdev_info(priv->netdev,
-                           "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
-       }
-
        if (!priv->is_lite)
                goto out;
 
@@ -2641,9 +2638,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
        /* UniMAC receive needs to be turned on */
        umac_enable_set(priv, CMD_RX_EN, 1);
 
-       /* Enable the interrupt wake-up source */
-       intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
-
        netif_dbg(priv, wol, ndev, "entered WOL mode\n");
 
        return 0;
index 61957b0bbd8c9f46773ff26ac9d14759b96c3960..0478e562abac16574e4a0b74c8aec774583557d1 100644 (file)
@@ -1884,8 +1884,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
                        tx_pkts++;
                        /* return full budget so NAPI will complete. */
-                       if (unlikely(tx_pkts > bp->tx_wake_thresh))
+                       if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
                                rx_pkts = budget;
+                               raw_cons = NEXT_RAW_CMP(raw_cons);
+                               break;
+                       }
                } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
                        if (likely(budget))
                                rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
@@ -1913,7 +1916,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                }
                raw_cons = NEXT_RAW_CMP(raw_cons);
 
-               if (rx_pkts == budget)
+               if (rx_pkts && rx_pkts == budget)
                        break;
        }
 
@@ -2027,8 +2030,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
        while (1) {
                work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
 
-               if (work_done >= budget)
+               if (work_done >= budget) {
+                       if (!budget)
+                               BNXT_CP_DB_REARM(cpr->cp_doorbell,
+                                                cpr->cp_raw_cons);
                        break;
+               }
 
                if (!bnxt_has_work(bp, cpr)) {
                        if (napi_complete_done(napi, work_done))
index f1a86b42261796865fc659bc042c6763a2393a6f..58b9744c405805d6c4ebf9c1bf5c840cee3ee3d3 100644 (file)
@@ -2160,6 +2160,7 @@ static void macb_configure_dma(struct macb *bp)
                else
                        dmacfg &= ~GEM_BIT(TXCOEN);
 
+               dmacfg &= ~GEM_BIT(ADDR64);
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
                if (bp->hw_dma_cap & HW_DMA_CAP_64B)
                        dmacfg |= GEM_BIT(ADDR64);
index 2708297e7795929e798ee73e5f4cc2c1907be12d..bf9b9fd6d2a07c720597fb72d1d6c6091a3369fd 100644 (file)
@@ -1158,7 +1158,7 @@ static void fec_enet_timeout_work(struct work_struct *work)
                napi_disable(&fep->napi);
                netif_tx_lock_bh(ndev);
                fec_restart(ndev);
-               netif_wake_queue(ndev);
+               netif_tx_wake_all_queues(ndev);
                netif_tx_unlock_bh(ndev);
                napi_enable(&fep->napi);
        }
@@ -1273,7 +1273,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
 
                /* Since we have freed up a buffer, the ring is no longer full
                 */
-               if (netif_queue_stopped(ndev)) {
+               if (netif_tx_queue_stopped(nq)) {
                        entries_free = fec_enet_get_free_txdesc_num(txq);
                        if (entries_free >= txq->tx_wake_threshold)
                                netif_tx_wake_queue(nq);
@@ -1746,7 +1746,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
                        napi_disable(&fep->napi);
                        netif_tx_lock_bh(ndev);
                        fec_restart(ndev);
-                       netif_wake_queue(ndev);
+                       netif_tx_wake_all_queues(ndev);
                        netif_tx_unlock_bh(ndev);
                        napi_enable(&fep->napi);
                }
@@ -2247,7 +2247,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
                napi_disable(&fep->napi);
                netif_tx_lock_bh(ndev);
                fec_restart(ndev);
-               netif_wake_queue(ndev);
+               netif_tx_wake_all_queues(ndev);
                netif_tx_unlock_bh(ndev);
                napi_enable(&fep->napi);
        }
index a051e582d541ad2e2191567b9b3b3d7d69a90fc0..79d03f8ee7b180d2cab9a2a647254461c0a0cb08 100644 (file)
@@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
        if (cb->type == DESC_TYPE_SKB)
                dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
                                 ring_to_dma_dir(ring));
-       else
+       else if (cb->length)
                dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
                               ring_to_dma_dir(ring));
 }
index f56855e63c961333f20f842a3558a920d201ccc9..28e907831b0eddbf760e0edb579ae7ae708520e0 100644 (file)
@@ -40,9 +40,9 @@
 #define SKB_TMP_LEN(SKB) \
        (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
 
-static void fill_v2_desc(struct hnae_ring *ring, void *priv,
-                        int size, dma_addr_t dma, int frag_end,
-                        int buf_num, enum hns_desc_type type, int mtu)
+static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
+                           int send_sz, dma_addr_t dma, int frag_end,
+                           int buf_num, enum hns_desc_type type, int mtu)
 {
        struct hnae_desc *desc = &ring->desc[ring->next_to_use];
        struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
        desc_cb->type = type;
 
        desc->addr = cpu_to_le64(dma);
-       desc->tx.send_size = cpu_to_le16((u16)size);
+       desc->tx.send_size = cpu_to_le16((u16)send_sz);
 
        /* config bd buffer end */
        hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
@@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
        ring_ptr_move_fw(ring, next_to_use);
 }
 
+static void fill_v2_desc(struct hnae_ring *ring, void *priv,
+                        int size, dma_addr_t dma, int frag_end,
+                        int buf_num, enum hns_desc_type type, int mtu)
+{
+       fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
+                       buf_num, type, mtu);
+}
+
 static const struct acpi_device_id hns_enet_acpi_match[] = {
        { "HISI00C1", 0 },
        { "HISI00C2", 0 },
@@ -289,15 +297,15 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
 
        /* when the frag size is bigger than hardware, split this frag */
        for (k = 0; k < frag_buf_num; k++)
-               fill_v2_desc(ring, priv,
-                            (k == frag_buf_num - 1) ?
+               fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
+                               (k == frag_buf_num - 1) ?
                                        sizeoflast : BD_MAX_SEND_SIZE,
-                            dma + BD_MAX_SEND_SIZE * k,
-                            frag_end && (k == frag_buf_num - 1) ? 1 : 0,
-                            buf_num,
-                            (type == DESC_TYPE_SKB && !k) ?
+                               dma + BD_MAX_SEND_SIZE * k,
+                               frag_end && (k == frag_buf_num - 1) ? 1 : 0,
+                               buf_num,
+                               (type == DESC_TYPE_SKB && !k) ?
                                        DESC_TYPE_SKB : DESC_TYPE_PAGE,
-                            mtu);
+                               mtu);
 }
 
 netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
@@ -1495,21 +1503,6 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
        return phy_mii_ioctl(phy_dev, ifr, cmd);
 }
 
-/* use only for netconsole to poll with the device without interrupt */
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void hns_nic_poll_controller(struct net_device *ndev)
-{
-       struct hns_nic_priv *priv = netdev_priv(ndev);
-       unsigned long flags;
-       int i;
-
-       local_irq_save(flags);
-       for (i = 0; i < priv->ae_handle->q_num * 2; i++)
-               napi_schedule(&priv->ring_data[i].napi);
-       local_irq_restore(flags);
-}
-#endif
-
 static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
                                    struct net_device *ndev)
 {
@@ -1962,9 +1955,6 @@ static const struct net_device_ops hns_nic_netdev_ops = {
        .ndo_set_features = hns_nic_set_features,
        .ndo_fix_features = hns_nic_fix_features,
        .ndo_get_stats64 = hns_nic_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = hns_nic_poll_controller,
-#endif
        .ndo_set_rx_mode = hns_nic_set_rx_mode,
        .ndo_select_queue = hns_nic_select_queue,
 };
index 09e9da10b786549b6232d8069c4e45857b95fd8c..4a8f82938ed5b87c8da6b09e88e08d387c652f0c 100644 (file)
@@ -789,23 +789,6 @@ static void hinic_get_stats64(struct net_device *netdev,
        stats->tx_errors  = nic_tx_stats->tx_dropped;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void hinic_netpoll(struct net_device *netdev)
-{
-       struct hinic_dev *nic_dev = netdev_priv(netdev);
-       int i, num_qps;
-
-       num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
-       for (i = 0; i < num_qps; i++) {
-               struct hinic_txq *txq = &nic_dev->txqs[i];
-               struct hinic_rxq *rxq = &nic_dev->rxqs[i];
-
-               napi_schedule(&txq->napi);
-               napi_schedule(&rxq->napi);
-       }
-}
-#endif
-
 static const struct net_device_ops hinic_netdev_ops = {
        .ndo_open = hinic_open,
        .ndo_stop = hinic_close,
@@ -818,9 +801,6 @@ static const struct net_device_ops hinic_netdev_ops = {
        .ndo_start_xmit = hinic_xmit_frame,
        .ndo_tx_timeout = hinic_tx_timeout,
        .ndo_get_stats64 = hinic_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = hinic_netpoll,
-#endif
 };
 
 static void netdev_features_init(struct net_device *netdev)
index ba580bfae512326d346e96e67718bec10ed716cc..03f64f40b2a3e0a3d9f3432cb6fbdd7bcd264a4c 100644 (file)
@@ -921,17 +921,6 @@ static int ehea_poll(struct napi_struct *napi, int budget)
        return rx;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ehea_netpoll(struct net_device *dev)
-{
-       struct ehea_port *port = netdev_priv(dev);
-       int i;
-
-       for (i = 0; i < port->num_def_qps; i++)
-               napi_schedule(&port->port_res[i].napi);
-}
-#endif
-
 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
 {
        struct ehea_port_res *pr = param;
@@ -2953,9 +2942,6 @@ static const struct net_device_ops ehea_netdev_ops = {
        .ndo_open               = ehea_open,
        .ndo_stop               = ehea_stop,
        .ndo_start_xmit         = ehea_start_xmit,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ehea_netpoll,
-#endif
        .ndo_get_stats64        = ehea_get_stats64,
        .ndo_set_mac_address    = ehea_set_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
index 4f0daf67b18df2dcf11d7a406ebc1982e0fee466..699ef942b615c3a22053ba1419399317a6642cfa 100644 (file)
@@ -2207,19 +2207,6 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget)
        return frames_processed;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ibmvnic_netpoll_controller(struct net_device *dev)
-{
-       struct ibmvnic_adapter *adapter = netdev_priv(dev);
-       int i;
-
-       replenish_pools(netdev_priv(dev));
-       for (i = 0; i < adapter->req_rx_queues; i++)
-               ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
-                                    adapter->rx_scrq[i]);
-}
-#endif
-
 static int wait_for_reset(struct ibmvnic_adapter *adapter)
 {
        int rc, ret;
@@ -2292,9 +2279,6 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
        .ndo_set_mac_address    = ibmvnic_set_mac,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_tx_timeout         = ibmvnic_tx_timeout,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ibmvnic_netpoll_controller,
-#endif
        .ndo_change_mtu         = ibmvnic_change_mtu,
        .ndo_features_check     = ibmvnic_features_check,
 };
index f27d73a7bf16f084ea8f4ab12d905b031f514cf8..6cdd58d9d461bd34ab62e5f68a43141f5cd70323 100644 (file)
@@ -3196,11 +3196,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                return budget;
 
        /* all work done, exit the polling mode */
-       napi_complete_done(napi, work_done);
-       if (adapter->rx_itr_setting & 1)
-               ixgbe_set_itr(q_vector);
-       if (!test_bit(__IXGBE_DOWN, &adapter->state))
-               ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
+       if (likely(napi_complete_done(napi, work_done))) {
+               if (adapter->rx_itr_setting & 1)
+                       ixgbe_set_itr(q_vector);
+               if (!test_bit(__IXGBE_DOWN, &adapter->state))
+                       ixgbe_irq_enable_queues(adapter,
+                                               BIT_ULL(q_vector->v_idx));
+       }
 
        return min(work_done, budget - 1);
 }
index db2cfcd21d43c52445ed66438a2b41be53b3f011..0f189f87385923226966a57537b7fa643e540a98 100644 (file)
@@ -54,6 +54,7 @@
 #include "en_stats.h"
 #include "en/fs.h"
 
+extern const struct net_device_ops mlx5e_netdev_ops;
 struct page_pool;
 
 #define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
index bbf69e859b78c1322a9e218797114c9483a1a060..1431232c9a09ef1ddecdf1e497142f82544e4de0 100644 (file)
@@ -16,6 +16,8 @@ struct mlx5e_tc_table {
 
        DECLARE_HASHTABLE(mod_hdr_tbl, 8);
        DECLARE_HASHTABLE(hairpin_tbl, 8);
+
+       struct notifier_block     netdevice_nb;
 };
 
 struct mlx5e_flow_table {
index 54118b77dc1f6d478c5b08e7c01526dd3e5dc740..f291d1bf15586b9bff40a3cc2ea8c62c71e62f05 100644 (file)
@@ -4315,7 +4315,7 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
        }
 }
 
-static const struct net_device_ops mlx5e_netdev_ops = {
+const struct net_device_ops mlx5e_netdev_ops = {
        .ndo_open                = mlx5e_open,
        .ndo_stop                = mlx5e_close,
        .ndo_start_xmit          = mlx5e_xmit,
index 9fed54017659de3b0f58a1287a7eff605c077f6c..85796727093eec1ddfc3873cc345f2b8d861ea01 100644 (file)
@@ -1368,6 +1368,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
 
                        *match_level = MLX5_MATCH_L2;
                }
+       } else {
+               MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
        }
 
        if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
@@ -2946,14 +2949,71 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
        return 0;
 }
 
+static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
+                                             struct mlx5e_priv *peer_priv)
+{
+       struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
+       struct mlx5e_hairpin_entry *hpe;
+       u16 peer_vhca_id;
+       int bkt;
+
+       if (!same_hw_devs(priv, peer_priv))
+               return;
+
+       peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
+
+       hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
+               if (hpe->peer_vhca_id == peer_vhca_id)
+                       hpe->hp->pair->peer_gone = true;
+       }
+}
+
+static int mlx5e_tc_netdev_event(struct notifier_block *this,
+                                unsigned long event, void *ptr)
+{
+       struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+       struct mlx5e_flow_steering *fs;
+       struct mlx5e_priv *peer_priv;
+       struct mlx5e_tc_table *tc;
+       struct mlx5e_priv *priv;
+
+       if (ndev->netdev_ops != &mlx5e_netdev_ops ||
+           event != NETDEV_UNREGISTER ||
+           ndev->reg_state == NETREG_REGISTERED)
+               return NOTIFY_DONE;
+
+       tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
+       fs = container_of(tc, struct mlx5e_flow_steering, tc);
+       priv = container_of(fs, struct mlx5e_priv, fs);
+       peer_priv = netdev_priv(ndev);
+       if (priv == peer_priv ||
+           !(priv->netdev->features & NETIF_F_HW_TC))
+               return NOTIFY_DONE;
+
+       mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
+
+       return NOTIFY_DONE;
+}
+
 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
 {
        struct mlx5e_tc_table *tc = &priv->fs.tc;
+       int err;
 
        hash_init(tc->mod_hdr_tbl);
        hash_init(tc->hairpin_tbl);
 
-       return rhashtable_init(&tc->ht, &tc_ht_params);
+       err = rhashtable_init(&tc->ht, &tc_ht_params);
+       if (err)
+               return err;
+
+       tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
+       if (register_netdevice_notifier(&tc->netdevice_nb)) {
+               tc->netdevice_nb.notifier_call = NULL;
+               mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
+       }
+
+       return err;
 }
 
 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
@@ -2969,6 +3029,9 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
 {
        struct mlx5e_tc_table *tc = &priv->fs.tc;
 
+       if (tc->netdevice_nb.notifier_call)
+               unregister_netdevice_notifier(&tc->netdevice_nb);
+
        rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
 
        if (!IS_ERR_OR_NULL(tc->t)) {
index 2b252cde5cc2db3cc6c90566e22d2187f328a99e..ea7dedc2d5adfc48081387619222c8e07da43bd4 100644 (file)
@@ -2000,7 +2000,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
        u32 max_guarantee = 0;
        int i;
 
-       for (i = 0; i <= esw->total_vports; i++) {
+       for (i = 0; i < esw->total_vports; i++) {
                evport = &esw->vports[i];
                if (!evport->enabled || evport->info.min_rate < max_guarantee)
                        continue;
@@ -2020,7 +2020,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
        int err;
        int i;
 
-       for (i = 0; i <= esw->total_vports; i++) {
+       for (i = 0; i < esw->total_vports; i++) {
                evport = &esw->vports[i];
                if (!evport->enabled)
                        continue;
index d2f76070ea7ca87bcc98c1382cd95504e0a60104..a1ee9a8a769e8a96e2c25f84454772159bb4bd16 100644 (file)
@@ -475,7 +475,8 @@ static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
 
        for (i = 0; i < hp->num_channels; i++) {
                mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
-               mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+               if (!hp->peer_gone)
+                       mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
        }
 }
 
@@ -567,6 +568,8 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
                                       MLX5_RQC_STATE_RST, 0, 0);
 
        /* unset peer SQs */
+       if (hp->peer_gone)
+               return;
        for (i = 0; i < hp->num_channels; i++)
                mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
                                       MLX5_SQC_STATE_RST, 0, 0);
index 8ed38fd5a8520e0e125d96bc2ee7c53d891a5969..c6d29fdbb880f1964847e674dc512d2f0311f0b7 100644 (file)
@@ -2077,14 +2077,17 @@ nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
        return true;
 }
 
-static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
 {
        struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
        struct nfp_net *nn = r_vec->nfp_net;
        struct nfp_net_dp *dp = &nn->dp;
+       unsigned int budget = 512;
 
-       while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
+       while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
                continue;
+
+       return budget;
 }
 
 static void nfp_ctrl_poll(unsigned long arg)
@@ -2096,9 +2099,13 @@ static void nfp_ctrl_poll(unsigned long arg)
        __nfp_ctrl_tx_queued(r_vec);
        spin_unlock_bh(&r_vec->lock);
 
-       nfp_ctrl_rx(r_vec);
-
-       nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+       if (nfp_ctrl_rx(r_vec)) {
+               nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+       } else {
+               tasklet_schedule(&r_vec->tasklet);
+               nn_dp_warn(&r_vec->nfp_net->dp,
+                          "control message budget exceeded!\n");
+       }
 }
 
 /* Setup and Configuration
index 69aa7fc392c5e4ad1cbcd9025f56bffdf3aa92c7..59c70be22a84c11262388529cf0ddf09887cea96 100644 (file)
@@ -72,9 +72,6 @@ static void netxen_schedule_work(struct netxen_adapter *adapter,
                work_func_t func, int delay);
 static void netxen_cancel_fw_work(struct netxen_adapter *adapter);
 static int netxen_nic_poll(struct napi_struct *napi, int budget);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netxen_nic_poll_controller(struct net_device *netdev);
-#endif
 
 static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
 static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
@@ -581,9 +578,6 @@ static const struct net_device_ops netxen_netdev_ops = {
        .ndo_tx_timeout    = netxen_tx_timeout,
        .ndo_fix_features = netxen_fix_features,
        .ndo_set_features = netxen_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = netxen_nic_poll_controller,
-#endif
 };
 
 static inline bool netxen_function_zero(struct pci_dev *pdev)
@@ -2402,23 +2396,6 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
        return work_done;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netxen_nic_poll_controller(struct net_device *netdev)
-{
-       int ring;
-       struct nx_host_sds_ring *sds_ring;
-       struct netxen_adapter *adapter = netdev_priv(netdev);
-       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
-
-       disable_irq(adapter->irq);
-       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
-               sds_ring = &recv_ctx->sds_rings[ring];
-               netxen_intr(adapter->irq, sds_ring);
-       }
-       enable_irq(adapter->irq);
-}
-#endif
-
 static int
 nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
 {
index 9b3ef00e57824a5fd72e7ae06e3d1d44b59b176b..a71382687ef2bedca91adcd50d8c16dcbd1cd3c7 100644 (file)
@@ -11987,6 +11987,7 @@ struct public_global {
        u32 running_bundle_id;
        s32 external_temperature;
        u32 mdump_reason;
+       u64 reserved;
        u32 data_ptr;
        u32 data_size;
 };
index 17f3dfa2cc94084552a6f66cae0044c46af35fec..e860bdf0f7524195afce3607fafe8075f733bd0a 100644 (file)
@@ -1710,7 +1710,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
 
                cm_info->local_ip[0] = ntohl(iph->daddr);
                cm_info->remote_ip[0] = ntohl(iph->saddr);
-               cm_info->ip_version = TCP_IPV4;
+               cm_info->ip_version = QED_TCP_IPV4;
 
                ip_hlen = (iph->ihl) * sizeof(u32);
                *payload_len = ntohs(iph->tot_len) - ip_hlen;
@@ -1730,7 +1730,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
                        cm_info->remote_ip[i] =
                            ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
                }
-               cm_info->ip_version = TCP_IPV6;
+               cm_info->ip_version = QED_TCP_IPV6;
 
                ip_hlen = sizeof(*ip6h);
                *payload_len = ntohs(ip6h->payload_len);
index be941cfaa2d4fdf9f50eedd6467033617bfcdba7..c71391b9c757a1b03f55f21cc641c4718bbce719 100644 (file)
@@ -228,7 +228,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
                                 num_cons, "Toggle");
        if (rc) {
                DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
-                          "Failed to allocate toogle bits, rc = %d\n", rc);
+                          "Failed to allocate toggle bits, rc = %d\n", rc);
                goto free_cq_map;
        }
 
index 7d7a64c55ff1fc2033e4ee273be0b40d10df8e74..f9167d1354bbef3ccf2e972e8c002e64bbc24cce 100644 (file)
@@ -140,23 +140,16 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
 
 static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
 {
-       enum roce_flavor flavor;
-
        switch (roce_mode) {
        case ROCE_V1:
-               flavor = PLAIN_ROCE;
-               break;
+               return PLAIN_ROCE;
        case ROCE_V2_IPV4:
-               flavor = RROCE_IPV4;
-               break;
+               return RROCE_IPV4;
        case ROCE_V2_IPV6:
-               flavor = ROCE_V2_IPV6;
-               break;
+               return RROCE_IPV6;
        default:
-               flavor = MAX_ROCE_MODE;
-               break;
+               return MAX_ROCE_FLAVOR;
        }
-       return flavor;
 }
 
 static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
index 8de644b4721efd63a7d3efa410139228d0b2f739..77b6248ad3b97d3a45caf27825faddabf9695a5b 100644 (file)
@@ -154,7 +154,7 @@ qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
 static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
                                  struct qed_tunnel_info *p_src)
 {
-       enum tunnel_clss type;
+       int type;
 
        p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
        p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
index 3d42696598202591794613afebea7ed42d51be6e..be118d057b92c5ad494690b7c80c98140dbb8e7a 100644 (file)
@@ -413,7 +413,6 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
        }
 
        if (!p_iov->b_pre_fp_hsi &&
-           ETH_HSI_VER_MINOR &&
            (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
                DP_INFO(p_hwfn,
                        "PF is using older fastpath HSI; %02x.%02x is configured\n",
@@ -572,7 +571,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
 static void
 __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
                           struct qed_tunn_update_type *p_src,
-                          enum qed_tunn_clss mask, u8 *p_cls)
+                          enum qed_tunn_mode mask, u8 *p_cls)
 {
        if (p_src->b_update_mode) {
                p_req->tun_mode_update_mask |= BIT(mask);
@@ -587,7 +586,7 @@ __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
 static void
 qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
                         struct qed_tunn_update_type *p_src,
-                        enum qed_tunn_clss mask,
+                        enum qed_tunn_mode mask,
                         u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
                         u8 *p_update_port, u16 *p_udp_port)
 {
index 81312924df1407092fd1dd43cc0555d16976160b..0c443ea98479ac0971a6e36c28bd8bde2f080bfa 100644 (file)
@@ -1800,7 +1800,8 @@ struct qlcnic_hardware_ops {
        int (*config_loopback) (struct qlcnic_adapter *, u8);
        int (*clear_loopback) (struct qlcnic_adapter *, u8);
        int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
-       void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
+       void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
+                                u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
        int (*get_board_info) (struct qlcnic_adapter *);
        void (*set_mac_filter_count) (struct qlcnic_adapter *);
        void (*free_mac_list) (struct qlcnic_adapter *);
@@ -2064,9 +2065,10 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
 }
 
 static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
-                                       u64 *addr, u16 id)
+                                       u64 *addr, u16 vlan,
+                                       struct qlcnic_host_tx_ring *tx_ring)
 {
-       adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+       adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
 }
 
 static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
index 569d54ededeca2e6472a3f8502e91c45be8e5232..a79d84f9910229515acf900e8286f71b8a010ae1 100644 (file)
@@ -2135,7 +2135,8 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
 }
 
 void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
-                                 u16 vlan_id)
+                                 u16 vlan_id,
+                                 struct qlcnic_host_tx_ring *tx_ring)
 {
        u8 mac[ETH_ALEN];
        memcpy(&mac, addr, ETH_ALEN);
index b75a812468569de7728fd9c654b6f1c7e353729f..73fe2f64491de24408d893a3eb91ffb691fe4f03 100644 (file)
@@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
 int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
 int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
 int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
-void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+                                 u16 vlan, struct qlcnic_host_tx_ring *ring);
 int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
 int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
 void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
index 4bb33af8e2b3a956db02847bfebfa6ef2362bb3b..56a3bd9e37dcd773e9d8b1d52366eaa971506fa6 100644 (file)
@@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
                         struct net_device *netdev);
 void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
 void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
-                              u64 *uaddr, u16 vlan_id);
+                              u64 *uaddr, u16 vlan_id,
+                              struct qlcnic_host_tx_ring *tx_ring);
 int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
                                     struct ethtool_coalesce *);
 int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
index 84dd83031a1bfcc31c0f8a908fef0c1bb3e7d155..9647578cbe6a8fec82409c4eadf9aee02f6c7971 100644 (file)
@@ -268,13 +268,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
 }
 
 void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
-                              u16 vlan_id)
+                              u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
 {
        struct cmd_desc_type0 *hwdesc;
        struct qlcnic_nic_req *req;
        struct qlcnic_mac_req *mac_req;
        struct qlcnic_vlan_req *vlan_req;
-       struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
        u32 producer;
        u64 word;
 
@@ -301,7 +300,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
 
 static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
                               struct cmd_desc_type0 *first_desc,
-                              struct sk_buff *skb)
+                              struct sk_buff *skb,
+                              struct qlcnic_host_tx_ring *tx_ring)
 {
        struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
        struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -335,7 +335,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
                    tmp_fil->vlan_id == vlan_id) {
                        if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
                                qlcnic_change_filter(adapter, &src_addr,
-                                                    vlan_id);
+                                                    vlan_id, tx_ring);
                        tmp_fil->ftime = jiffies;
                        return;
                }
@@ -350,7 +350,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
        if (!fil)
                return;
 
-       qlcnic_change_filter(adapter, &src_addr, vlan_id);
+       qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
        fil->ftime = jiffies;
        fil->vlan_id = vlan_id;
        memcpy(fil->faddr, &src_addr, ETH_ALEN);
@@ -766,7 +766,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        }
 
        if (adapter->drv_mac_learn)
-               qlcnic_send_filter(adapter, first_desc, skb);
+               qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
 
        tx_ring->tx_stats.tx_bytes += skb->len;
        tx_ring->tx_stats.xmit_called++;
index 2d38d1ac2aae58fd210030c7b143011f76b921cc..dbd48012224f2467d27134eedc692a68b92b1a04 100644 (file)
@@ -59,9 +59,6 @@ static int qlcnic_close(struct net_device *netdev);
 static void qlcnic_tx_timeout(struct net_device *netdev);
 static void qlcnic_attach_work(struct work_struct *work);
 static void qlcnic_fwinit_work(struct work_struct *work);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void qlcnic_poll_controller(struct net_device *netdev);
-#endif
 
 static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
 static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
@@ -545,9 +542,6 @@ static const struct net_device_ops qlcnic_netdev_ops = {
        .ndo_udp_tunnel_add     = qlcnic_add_vxlan_port,
        .ndo_udp_tunnel_del     = qlcnic_del_vxlan_port,
        .ndo_features_check     = qlcnic_features_check,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = qlcnic_poll_controller,
-#endif
 #ifdef CONFIG_QLCNIC_SRIOV
        .ndo_set_vf_mac         = qlcnic_sriov_set_vf_mac,
        .ndo_set_vf_rate        = qlcnic_sriov_set_vf_tx_rate,
@@ -3200,45 +3194,6 @@ static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void qlcnic_poll_controller(struct net_device *netdev)
-{
-       struct qlcnic_adapter *adapter = netdev_priv(netdev);
-       struct qlcnic_host_sds_ring *sds_ring;
-       struct qlcnic_recv_context *recv_ctx;
-       struct qlcnic_host_tx_ring *tx_ring;
-       int ring;
-
-       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
-               return;
-
-       recv_ctx = adapter->recv_ctx;
-
-       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
-               sds_ring = &recv_ctx->sds_rings[ring];
-               qlcnic_disable_sds_intr(adapter, sds_ring);
-               napi_schedule(&sds_ring->napi);
-       }
-
-       if (adapter->flags & QLCNIC_MSIX_ENABLED) {
-               /* Only Multi-Tx queue capable devices need to
-                * schedule NAPI for TX rings
-                */
-               if ((qlcnic_83xx_check(adapter) &&
-                    (adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
-                   (qlcnic_82xx_check(adapter) &&
-                    !qlcnic_check_multi_tx(adapter)))
-                       return;
-
-               for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
-                       tx_ring = &adapter->tx_ring[ring];
-                       qlcnic_disable_tx_intr(adapter, tx_ring);
-                       napi_schedule(&tx_ring->napi);
-               }
-       }
-}
-#endif
-
 static void
 qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
 {
index 7fd86d40a3374df1fba991ece10e6ec48bc197e1..11167abe5934d3a2d2d71f6cb0f7674d665d9d5b 100644 (file)
@@ -113,7 +113,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
        struct sk_buff *skbn;
 
        if (skb->dev->type == ARPHRD_ETHER) {
-               if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) {
+               if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
                        kfree_skb(skb);
                        return;
                }
@@ -147,7 +147,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
        }
 
        if (skb_headroom(skb) < required_headroom) {
-               if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
+               if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
                        return -ENOMEM;
        }
 
@@ -189,6 +189,9 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
        if (!skb)
                goto done;
 
+       if (skb->pkt_type == PACKET_LOOPBACK)
+               return RX_HANDLER_PASS;
+
        dev = skb->dev;
        port = rmnet_get_port(dev);
 
index ab30aaeac6d377e6303fafc57f820e3022ae7773..9a5e2969df6197cd3383e263b2336dca5faa1a2d 100644 (file)
@@ -4072,13 +4072,12 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
 
        genphy_soft_reset(dev->phydev);
 
-       /* It was reported that chip version 33 ends up with 10MBit/Half on a
+       /* It was reported that several chips end up with 10MBit/Half on a
         * 1GBit link after resuming from S3. For whatever reason the PHY on
-        * this chip doesn't properly start a renegotiation when soft-reset.
+        * these chips doesn't properly start a renegotiation when soft-reset.
         * Explicitly requesting a renegotiation fixes this.
         */
-       if (tp->mac_version == RTL_GIGA_MAC_VER_33 &&
-           dev->phydev->autoneg == AUTONEG_ENABLE)
+       if (dev->phydev->autoneg == AUTONEG_ENABLE)
                phy_restart_aneg(dev->phydev);
 }
 
@@ -4536,9 +4535,14 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
 
 static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
 {
-       /* Set DMA burst size and Interframe Gap Time */
-       RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) |
-               (InterFrameGap << TxInterFrameGapShift));
+       u32 val = TX_DMA_BURST << TxDMAShift |
+                 InterFrameGap << TxInterFrameGapShift;
+
+       if (tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
+           tp->mac_version != RTL_GIGA_MAC_VER_39)
+               val |= TXCFG_AUTO_FIFO;
+
+       RTL_W32(tp, TxConfig, val);
 }
 
 static void rtl_set_rx_max_size(struct rtl8169_private *tp)
@@ -5033,7 +5037,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
 
        rtl_disable_clock_request(tp);
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
 
        /* Adjust EEE LED frequency */
@@ -5067,7 +5070,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
 
        rtl_disable_clock_request(tp);
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
        RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
        RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
@@ -5112,8 +5114,6 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
 
 static void rtl_hw_start_8168g(struct rtl8169_private *tp)
 {
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
        rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5211,8 +5211,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
        rtl_hw_aspm_clkreq_enable(tp, false);
        rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
        rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5295,8 +5293,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
 {
        rtl8168ep_stop_cmac(tp);
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
        rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
@@ -5618,7 +5614,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
        /* Force LAN exit from ASPM if Rx/Tx are not idle */
        RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
 
        rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
@@ -6869,8 +6864,10 @@ static int rtl8169_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct net_device *dev = pci_get_drvdata(pdev);
+       struct rtl8169_private *tp = netdev_priv(dev);
 
        rtl8169_net_suspend(dev);
+       clk_disable_unprepare(tp->clk);
 
        return 0;
 }
@@ -6898,6 +6895,9 @@ static int rtl8169_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct net_device *dev = pci_get_drvdata(pdev);
+       struct rtl8169_private *tp = netdev_priv(dev);
+
+       clk_prepare_enable(tp->clk);
 
        if (netif_running(dev))
                __rtl8169_resume(dev);
index 330233286e785254f5f29c87f9557a305974f606..3d0dd39c289e05b8a7a6778363461ef5698dc62b 100644 (file)
@@ -2206,29 +2206,6 @@ static void efx_fini_napi(struct efx_nic *efx)
                efx_fini_napi_channel(channel);
 }
 
-/**************************************************************************
- *
- * Kernel netpoll interface
- *
- *************************************************************************/
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-
-/* Although in the common case interrupts will be disabled, this is not
- * guaranteed. However, all our work happens inside the NAPI callback,
- * so no locking is required.
- */
-static void efx_netpoll(struct net_device *net_dev)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       struct efx_channel *channel;
-
-       efx_for_each_channel(channel, efx)
-               efx_schedule_channel(channel);
-}
-
-#endif
-
 /**************************************************************************
  *
  * Kernel net device interface
@@ -2509,9 +2486,6 @@ static const struct net_device_ops efx_netdev_ops = {
 #endif
        .ndo_get_phys_port_id   = efx_get_phys_port_id,
        .ndo_get_phys_port_name = efx_get_phys_port_name,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = efx_netpoll,
-#endif
        .ndo_setup_tc           = efx_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = efx_filter_rfs,
index dd5530a4f8c8936868aed7171bd9481f93730d76..03e2455c502eacd9a4fd5c7fd320a9edcf265f77 100644 (file)
@@ -2052,29 +2052,6 @@ static void ef4_fini_napi(struct ef4_nic *efx)
                ef4_fini_napi_channel(channel);
 }
 
-/**************************************************************************
- *
- * Kernel netpoll interface
- *
- *************************************************************************/
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-
-/* Although in the common case interrupts will be disabled, this is not
- * guaranteed. However, all our work happens inside the NAPI callback,
- * so no locking is required.
- */
-static void ef4_netpoll(struct net_device *net_dev)
-{
-       struct ef4_nic *efx = netdev_priv(net_dev);
-       struct ef4_channel *channel;
-
-       ef4_for_each_channel(channel, efx)
-               ef4_schedule_channel(channel);
-}
-
-#endif
-
 /**************************************************************************
  *
  * Kernel net device interface
@@ -2250,9 +2227,6 @@ static const struct net_device_ops ef4_netdev_ops = {
        .ndo_set_mac_address    = ef4_set_mac_address,
        .ndo_set_rx_mode        = ef4_set_rx_mode,
        .ndo_set_features       = ef4_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = ef4_netpoll,
-#endif
        .ndo_setup_tc           = ef4_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = ef4_filter_rfs,
index 23a52b9293f35eaec1d71063305a029ba466d819..cd1d8faccca5fb36b488312d734d5e42cebb7b1a 100644 (file)
@@ -1308,8 +1308,7 @@ static int adf7242_remove(struct spi_device *spi)
 {
        struct adf7242_local *lp = spi_get_drvdata(spi);
 
-       if (!IS_ERR_OR_NULL(lp->debugfs_root))
-               debugfs_remove_recursive(lp->debugfs_root);
+       debugfs_remove_recursive(lp->debugfs_root);
 
        cancel_delayed_work_sync(&lp->work);
        destroy_workqueue(lp->wqueue);
index 58299fb666ed4d84fb7ea01a76aabb86575ab939..0ff5a403a8dc356a359fb085be26379ca011b67b 100644 (file)
@@ -634,10 +634,9 @@ static int ca8210_test_int_driver_write(
        for (i = 0; i < len; i++)
                dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]);
 
-       fifo_buffer = kmalloc(len, GFP_KERNEL);
+       fifo_buffer = kmemdup(buf, len, GFP_KERNEL);
        if (!fifo_buffer)
                return -ENOMEM;
-       memcpy(fifo_buffer, buf, len);
        kfifo_in(&test->up_fifo, &fifo_buffer, 4);
        wake_up_interruptible(&priv->test.readq);
 
@@ -3044,8 +3043,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv)
 {
        struct ca8210_test *test = &priv->test;
 
-       if (!IS_ERR(test->ca8210_dfs_spi_int))
-               debugfs_remove(test->ca8210_dfs_spi_int);
+       debugfs_remove(test->ca8210_dfs_spi_int);
        kfifo_free(&test->up_fifo);
        dev_info(&priv->spi->dev, "Test interface removed\n");
 }
index e428277781ac4422bec2e8f47fd35476a85a74f7..04891429a55423e4ea4a3f5f5025631c01cda13a 100644 (file)
@@ -903,19 +903,19 @@ mcr20a_irq_clean_complete(void *context)
 
        switch (seq_state) {
        /* TX IRQ, RX IRQ and SEQ IRQ */
-       case (0x03):
+       case (DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
                if (lp->is_tx) {
                        lp->is_tx = 0;
                        dev_dbg(printdev(lp), "TX is done. No ACK\n");
                        mcr20a_handle_tx_complete(lp);
                }
                break;
-       case (0x05):
+       case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
                        /* rx is starting */
                        dev_dbg(printdev(lp), "RX is starting\n");
                        mcr20a_handle_rx(lp);
                break;
-       case (0x07):
+       case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
                if (lp->is_tx) {
                        /* tx is done */
                        lp->is_tx = 0;
@@ -927,7 +927,7 @@ mcr20a_irq_clean_complete(void *context)
                        mcr20a_handle_rx(lp);
                }
                break;
-       case (0x01):
+       case (DAR_IRQSTS1_SEQIRQ):
                if (lp->is_tx) {
                        dev_dbg(printdev(lp), "TX is starting\n");
                        mcr20a_handle_tx(lp);
index db1172db1e7cb7df0fed8a21b0a7757ae6c068b5..19ab8a7d1e4863dc5b0a5208c4b7e674ba2ff6de 100644 (file)
@@ -93,7 +93,12 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
        if (!netdev)
                return !phydev->suspended;
 
-       /* Don't suspend PHY if the attached netdev parent may wakeup.
+       if (netdev->wol_enabled)
+               return false;
+
+       /* As long as not all affected network drivers support the
+        * wol_enabled flag, let's check for hints that WoL is enabled.
+        * Don't suspend PHY if the attached netdev parent may wake up.
         * The parent may point to a PCI device, as in tg3 driver.
         */
        if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent))
@@ -1132,9 +1137,9 @@ void phy_detach(struct phy_device *phydev)
                sysfs_remove_link(&dev->dev.kobj, "phydev");
                sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev");
        }
+       phy_suspend(phydev);
        phydev->attached_dev->phydev = NULL;
        phydev->attached_dev = NULL;
-       phy_suspend(phydev);
        phydev->phylink = NULL;
 
        phy_led_triggers_unregister(phydev);
@@ -1168,12 +1173,13 @@ EXPORT_SYMBOL(phy_detach);
 int phy_suspend(struct phy_device *phydev)
 {
        struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
+       struct net_device *netdev = phydev->attached_dev;
        struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
        int ret = 0;
 
        /* If the device has WOL enabled, we cannot suspend the PHY */
        phy_ethtool_get_wol(phydev, &wol);
-       if (wol.wolopts)
+       if (wol.wolopts || (netdev && netdev->wol_enabled))
                return -EBUSY;
 
        if (phydev->drv && phydrv->suspend)
index 52fffb98fde9ac3fd05c7f6fd8e5dc123ecae341..6e13b8832bc7df94467211f07c1e7dba15a6e877 100644 (file)
@@ -1098,8 +1098,11 @@ static int sfp_hwmon_insert(struct sfp *sfp)
 
 static void sfp_hwmon_remove(struct sfp *sfp)
 {
-       hwmon_device_unregister(sfp->hwmon_dev);
-       kfree(sfp->hwmon_name);
+       if (!IS_ERR_OR_NULL(sfp->hwmon_dev)) {
+               hwmon_device_unregister(sfp->hwmon_dev);
+               sfp->hwmon_dev = NULL;
+               kfree(sfp->hwmon_name);
+       }
 }
 #else
 static int sfp_hwmon_insert(struct sfp *sfp)
index e2648b5a3861e51dc6c40d19e1198a5f3f7ca7af..50e9cc19023a701bad861ac117665a024ba776b1 100644 (file)
@@ -181,6 +181,7 @@ struct tun_file {
        };
        struct napi_struct napi;
        bool napi_enabled;
+       bool napi_frags_enabled;
        struct mutex napi_mutex;        /* Protects access to the above napi */
        struct list_head next;
        struct tun_struct *detached;
@@ -313,32 +314,32 @@ static int tun_napi_poll(struct napi_struct *napi, int budget)
 }
 
 static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
-                         bool napi_en)
+                         bool napi_en, bool napi_frags)
 {
        tfile->napi_enabled = napi_en;
+       tfile->napi_frags_enabled = napi_en && napi_frags;
        if (napi_en) {
                netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
                               NAPI_POLL_WEIGHT);
                napi_enable(&tfile->napi);
-               mutex_init(&tfile->napi_mutex);
        }
 }
 
-static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile)
+static void tun_napi_disable(struct tun_file *tfile)
 {
        if (tfile->napi_enabled)
                napi_disable(&tfile->napi);
 }
 
-static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile)
+static void tun_napi_del(struct tun_file *tfile)
 {
        if (tfile->napi_enabled)
                netif_napi_del(&tfile->napi);
 }
 
-static bool tun_napi_frags_enabled(const struct tun_struct *tun)
+static bool tun_napi_frags_enabled(const struct tun_file *tfile)
 {
-       return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS;
+       return tfile->napi_frags_enabled;
 }
 
 #ifdef CONFIG_TUN_VNET_CROSS_LE
@@ -690,8 +691,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
        tun = rtnl_dereference(tfile->tun);
 
        if (tun && clean) {
-               tun_napi_disable(tun, tfile);
-               tun_napi_del(tun, tfile);
+               tun_napi_disable(tfile);
+               tun_napi_del(tfile);
        }
 
        if (tun && !tfile->detached) {
@@ -758,7 +759,7 @@ static void tun_detach_all(struct net_device *dev)
        for (i = 0; i < n; i++) {
                tfile = rtnl_dereference(tun->tfiles[i]);
                BUG_ON(!tfile);
-               tun_napi_disable(tun, tfile);
+               tun_napi_disable(tfile);
                tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
                tfile->socket.sk->sk_data_ready(tfile->socket.sk);
                RCU_INIT_POINTER(tfile->tun, NULL);
@@ -774,7 +775,7 @@ static void tun_detach_all(struct net_device *dev)
        synchronize_net();
        for (i = 0; i < n; i++) {
                tfile = rtnl_dereference(tun->tfiles[i]);
-               tun_napi_del(tun, tfile);
+               tun_napi_del(tfile);
                /* Drop read queue */
                tun_queue_purge(tfile);
                xdp_rxq_info_unreg(&tfile->xdp_rxq);
@@ -793,7 +794,7 @@ static void tun_detach_all(struct net_device *dev)
 }
 
 static int tun_attach(struct tun_struct *tun, struct file *file,
-                     bool skip_filter, bool napi)
+                     bool skip_filter, bool napi, bool napi_frags)
 {
        struct tun_file *tfile = file->private_data;
        struct net_device *dev = tun->dev;
@@ -866,7 +867,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
                tun_enable_queue(tfile);
        } else {
                sock_hold(&tfile->sk);
-               tun_napi_init(tun, tfile, napi);
+               tun_napi_init(tun, tfile, napi, napi_frags);
        }
 
        tun_set_real_num_queues(tun);
@@ -1709,7 +1710,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        int err;
        u32 rxhash = 0;
        int skb_xdp = 1;
-       bool frags = tun_napi_frags_enabled(tun);
+       bool frags = tun_napi_frags_enabled(tfile);
 
        if (!(tun->dev->flags & IFF_UP))
                return -EIO;
@@ -2534,7 +2535,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                        return err;
 
                err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
-                                ifr->ifr_flags & IFF_NAPI);
+                                ifr->ifr_flags & IFF_NAPI,
+                                ifr->ifr_flags & IFF_NAPI_FRAGS);
                if (err < 0)
                        return err;
 
@@ -2632,7 +2634,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                              (ifr->ifr_flags & TUN_FEATURES);
 
                INIT_LIST_HEAD(&tun->disabled);
-               err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI);
+               err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
+                                ifr->ifr_flags & IFF_NAPI_FRAGS);
                if (err < 0)
                        goto err_free_flow;
 
@@ -2781,7 +2784,8 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
                ret = security_tun_dev_attach_queue(tun->security);
                if (ret < 0)
                        goto unlock;
-               ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI);
+               ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
+                                tun->flags & IFF_NAPI_FRAGS);
        } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
                tun = rtnl_dereference(tfile->tun);
                if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
@@ -3199,6 +3203,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
                return -ENOMEM;
        }
 
+       mutex_init(&tfile->napi_mutex);
        RCU_INIT_POINTER(tfile->tun, NULL);
        tfile->flags = 0;
        tfile->ifindex = 0;
index e95dd12edec473198125c18c1cec6bc7d32ec368..023b8d0bf1754e833e08514b9cf6165ce3240984 100644 (file)
@@ -607,6 +607,9 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
        struct usbnet *dev = netdev_priv(net);
        u8 opt = 0;
 
+       if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+               return -EINVAL;
+
        if (wolinfo->wolopts & WAKE_PHY)
                opt |= AX_MONITOR_LINK;
        if (wolinfo->wolopts & WAKE_MAGIC)
index 9e8ad372f4190eed1d4e92891193d325c44fb47f..2207f7a7d1ffbb3fe6c4fefa101c4bb2ae01384e 100644 (file)
@@ -566,6 +566,9 @@ ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
        struct usbnet *dev = netdev_priv(net);
        u8 opt = 0;
 
+       if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+               return -EINVAL;
+
        if (wolinfo->wolopts & WAKE_PHY)
                opt |= AX_MONITOR_MODE_RWLC;
        if (wolinfo->wolopts & WAKE_MAGIC)
index a9991c5f4736b6dd1e395527fbfeeecc3d0ab303..c3c9ba44e2a12a038e012a3374977b5a6189e3f1 100644 (file)
@@ -1401,19 +1401,10 @@ static int lan78xx_set_wol(struct net_device *netdev,
        if (ret < 0)
                return ret;
 
-       pdata->wol = 0;
-       if (wol->wolopts & WAKE_UCAST)
-               pdata->wol |= WAKE_UCAST;
-       if (wol->wolopts & WAKE_MCAST)
-               pdata->wol |= WAKE_MCAST;
-       if (wol->wolopts & WAKE_BCAST)
-               pdata->wol |= WAKE_BCAST;
-       if (wol->wolopts & WAKE_MAGIC)
-               pdata->wol |= WAKE_MAGIC;
-       if (wol->wolopts & WAKE_PHY)
-               pdata->wol |= WAKE_PHY;
-       if (wol->wolopts & WAKE_ARP)
-               pdata->wol |= WAKE_ARP;
+       if (wol->wolopts & ~WAKE_ALL)
+               return -EINVAL;
+
+       pdata->wol = wol->wolopts;
 
        device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
 
index 2cd71bdb6484c774659598fff1e99cd49181337b..f1b5201cc32075da27cf14d94b781c9f58c16189 100644 (file)
@@ -4506,6 +4506,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
        if (!rtl_can_wakeup(tp))
                return -EOPNOTSUPP;
 
+       if (wol->wolopts & ~WAKE_ANY)
+               return -EINVAL;
+
        ret = usb_autopm_get_interface(tp->intf);
        if (ret < 0)
                goto out_set_wol;
index 05553d2524469f97e4a02bb48f43f6820ad2b3e5..e5a4cbb366dc7b616fda3ee1b70d192b001ed3c1 100644 (file)
@@ -731,6 +731,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
        struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
        int ret;
 
+       if (wolinfo->wolopts & ~SUPPORTED_WAKE)
+               return -EINVAL;
+
        pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
 
        ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
index 06b4d290784dad95f893b63da62d26e020fc060a..262e7a3c23cb67fbfd66b81ed0d26af0f0480d84 100644 (file)
@@ -774,6 +774,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
        int ret;
 
+       if (wolinfo->wolopts & ~SUPPORTED_WAKE)
+               return -EINVAL;
+
        pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
 
        ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
index 9277a0f228dfa6de355c74d2652edcf2fb1d2f4b..35f39f23d88144195b8f007035f207d38b48c1fd 100644 (file)
@@ -421,6 +421,9 @@ sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
        struct usbnet *dev = netdev_priv(net);
        u8 opt = 0;
 
+       if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+               return -EINVAL;
+
        if (wolinfo->wolopts & WAKE_PHY)
                opt |= SR_MONITOR_LINK;
        if (wolinfo->wolopts & WAKE_MAGIC)
index 76592090522607a4bdf5422b1d49ec99c6fd68ac..dab504ec5e502be401cbfe9a8e3f0f572c0220ba 100644 (file)
@@ -1699,17 +1699,6 @@ static void virtnet_stats(struct net_device *dev,
        tot->rx_frame_errors = dev->stats.rx_frame_errors;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void virtnet_netpoll(struct net_device *dev)
-{
-       struct virtnet_info *vi = netdev_priv(dev);
-       int i;
-
-       for (i = 0; i < vi->curr_queue_pairs; i++)
-               napi_schedule(&vi->rq[i].napi);
-}
-#endif
-
 static void virtnet_ack_link_announce(struct virtnet_info *vi)
 {
        rtnl_lock();
@@ -2447,9 +2436,6 @@ static const struct net_device_ops virtnet_netdev = {
        .ndo_get_stats64     = virtnet_stats,
        .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = virtnet_netpoll,
-#endif
        .ndo_bpf                = virtnet_xdp,
        .ndo_xdp_xmit           = virtnet_xdp_xmit,
        .ndo_features_check     = passthru_features_check,
index ababba37d735d62b7fe0500983f411d7806baa17..2b8da2b7e721e33f0683efa61e50ceac68d256e7 100644 (file)
@@ -3539,6 +3539,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
                nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
                nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_TTL */
+               nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_TTL_INHERIT */
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_TOS */
                nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_LEARNING */
@@ -3603,6 +3604,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
        }
 
        if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
+           nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
+                      !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) ||
            nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
            nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
            nla_put_u8(skb, IFLA_VXLAN_LEARNING,
index 094cea775d0c0bd3090102cf5d511d08d718fef7..ef298d8525c5481c8df55f933cf0df6452ba41c0 100644 (file)
@@ -257,7 +257,7 @@ static const struct
        [I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO },
        [I2400M_MS_BUSY] = { "busy", -EBUSY },
        [I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ },
-       [I2400M_MS_UNINITIALIZED] = { "not unitialized", -EILSEQ },
+       [I2400M_MS_UNINITIALIZED] = { "uninitialized", -EILSEQ },
        [I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO },
        [I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO },
        [I2400M_MS_NO_RF] = { "no RF", -EIO },
index 6b0e1ec346cb60aacd8076600033cf9ee554c462..d46d57b989aec0d1fa869128b62022b7be401892 100644 (file)
@@ -1518,13 +1518,15 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
                        }
                } else {
                        /* More than a single header/data pair were missed.
-                        * Report this error, and reset the controller to
+                        * Report this error. If running with open-source
+                        * firmware, then reset the controller to
                         * revive operation.
                         */
                        b43dbg(dev->wl,
                               "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
                               ring->index, firstused, slot);
-                       b43_controller_restart(dev, "Out of order TX");
+                       if (dev->fw.opensource)
+                               b43_controller_restart(dev, "Out of order TX");
                        return;
                }
        }
index 5916879849621dc079a1cb0cd61848201057527d..497fd766d87c83e0bcda39ff47b9a37177f79b09 100644 (file)
@@ -51,6 +51,7 @@
 
 static const struct iwl_base_params iwl1000_base_params = {
        .num_of_queues = IWLAGN_NUM_QUEUES,
+       .max_tfd_queue_size = 256,
        .eeprom_size = OTP_LOW_IMAGE_SIZE,
        .pll_cfg = true,
        .max_ll_items = OTP_MAX_LL_ITEMS_1000,
index 1068757ec42e4784942e69c00161ae4e1ee16548..07442ada6dd0e419bf4f29fabd3cb67186e47650 100644 (file)
@@ -520,7 +520,6 @@ struct mac80211_hwsim_data {
        int channels, idx;
        bool use_chanctx;
        bool destroy_on_close;
-       struct work_struct destroy_work;
        u32 portid;
        char alpha2[2];
        const struct ieee80211_regdomain *regd;
@@ -2935,8 +2934,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
        hwsim_radios_generation++;
        spin_unlock_bh(&hwsim_radio_lock);
 
-       if (idx > 0)
-               hwsim_mcast_new_radio(idx, info, param);
+       hwsim_mcast_new_radio(idx, info, param);
 
        return idx;
 
@@ -3565,30 +3563,27 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
        .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
 };
 
-static void destroy_radio(struct work_struct *work)
-{
-       struct mac80211_hwsim_data *data =
-               container_of(work, struct mac80211_hwsim_data, destroy_work);
-
-       hwsim_radios_generation++;
-       mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL);
-}
-
 static void remove_user_radios(u32 portid)
 {
        struct mac80211_hwsim_data *entry, *tmp;
+       LIST_HEAD(list);
 
        spin_lock_bh(&hwsim_radio_lock);
        list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
                if (entry->destroy_on_close && entry->portid == portid) {
-                       list_del(&entry->list);
+                       list_move(&entry->list, &list);
                        rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht,
                                               hwsim_rht_params);
-                       INIT_WORK(&entry->destroy_work, destroy_radio);
-                       queue_work(hwsim_wq, &entry->destroy_work);
+                       hwsim_radios_generation++;
                }
        }
        spin_unlock_bh(&hwsim_radio_lock);
+
+       list_for_each_entry_safe(entry, tmp, &list, list) {
+               list_del(&entry->list);
+               mac80211_hwsim_del_radio(entry, wiphy_name(entry->hw->wiphy),
+                                        NULL);
+       }
 }
 
 static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
@@ -3646,6 +3641,7 @@ static __net_init int hwsim_init_net(struct net *net)
 static void __net_exit hwsim_exit_net(struct net *net)
 {
        struct mac80211_hwsim_data *data, *tmp;
+       LIST_HEAD(list);
 
        spin_lock_bh(&hwsim_radio_lock);
        list_for_each_entry_safe(data, tmp, &hwsim_radios, list) {
@@ -3656,17 +3652,19 @@ static void __net_exit hwsim_exit_net(struct net *net)
                if (data->netgroup == hwsim_net_get_netgroup(&init_net))
                        continue;
 
-               list_del(&data->list);
+               list_move(&data->list, &list);
                rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
                                       hwsim_rht_params);
                hwsim_radios_generation++;
-               spin_unlock_bh(&hwsim_radio_lock);
+       }
+       spin_unlock_bh(&hwsim_radio_lock);
+
+       list_for_each_entry_safe(data, tmp, &list, list) {
+               list_del(&data->list);
                mac80211_hwsim_del_radio(data,
                                         wiphy_name(data->hw->wiphy),
                                         NULL);
-               spin_lock_bh(&hwsim_radio_lock);
        }
-       spin_unlock_bh(&hwsim_radio_lock);
 
        ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net));
 }
index cf6ffb1ba4a290e1561374c68af9b947215af0ea..22bc9d368728624ef70a0f009d38e8925380c872 100644 (file)
@@ -77,9 +77,8 @@ static void mt76x0_remove_interface(struct ieee80211_hw *hw,
 {
        struct mt76x0_dev *dev = hw->priv;
        struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
-       unsigned int wcid = mvif->group_wcid.idx;
 
-       dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
+       dev->vif_mask &= ~BIT(mvif->idx);
 }
 
 static int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
index a46a1e94505d01d782efea328945d91252449981..936c0b3e0ba28ec1f6586a5bab86d403a86d0dd6 100644 (file)
@@ -241,8 +241,9 @@ struct xenvif_hash_cache {
 struct xenvif_hash {
        unsigned int alg;
        u32 flags;
+       bool mapping_sel;
        u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
-       u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE];
+       u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE];
        unsigned int size;
        struct xenvif_hash_cache cache;
 };
index 3c4c58b9fe76edfbf3d27fb5b6dbd0184ba706c0..0ccb021f1e78687d7c7a9814a05369aafe7c6508 100644 (file)
@@ -324,7 +324,8 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
                return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
 
        vif->hash.size = size;
-       memset(vif->hash.mapping, 0, sizeof(u32) * size);
+       memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
+              sizeof(u32) * size);
 
        return XEN_NETIF_CTRL_STATUS_SUCCESS;
 }
@@ -332,31 +333,49 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
 u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
                            u32 off)
 {
-       u32 *mapping = &vif->hash.mapping[off];
-       struct gnttab_copy copy_op = {
+       u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
+       unsigned int nr = 1;
+       struct gnttab_copy copy_op[2] = {{
                .source.u.ref = gref,
                .source.domid = vif->domid,
-               .dest.u.gmfn = virt_to_gfn(mapping),
                .dest.domid = DOMID_SELF,
-               .dest.offset = xen_offset_in_page(mapping),
-               .len = len * sizeof(u32),
+               .len = len * sizeof(*mapping),
                .flags = GNTCOPY_source_gref
-       };
+       }};
 
-       if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
+       if ((off + len < off) || (off + len > vif->hash.size) ||
+           len > XEN_PAGE_SIZE / sizeof(*mapping))
                return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
 
-       while (len-- != 0)
-               if (mapping[off++] >= vif->num_queues)
-                       return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+       copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off);
+       copy_op[0].dest.offset = xen_offset_in_page(mapping + off);
+       if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) {
+               copy_op[1] = copy_op[0];
+               copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset;
+               copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len);
+               copy_op[1].dest.offset = 0;
+               copy_op[1].len = copy_op[0].len - copy_op[1].source.offset;
+               copy_op[0].len = copy_op[1].source.offset;
+               nr = 2;
+       }
 
-       if (copy_op.len != 0) {
-               gnttab_batch_copy(&copy_op, 1);
+       memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
+              vif->hash.size * sizeof(*mapping));
 
-               if (copy_op.status != GNTST_okay)
+       if (copy_op[0].len != 0) {
+               gnttab_batch_copy(copy_op, nr);
+
+               if (copy_op[0].status != GNTST_okay ||
+                   copy_op[nr - 1].status != GNTST_okay)
                        return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
        }
 
+       while (len-- != 0)
+               if (mapping[off++] >= vif->num_queues)
+                       return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+       vif->hash.mapping_sel = !vif->hash.mapping_sel;
+
        return XEN_NETIF_CTRL_STATUS_SUCCESS;
 }
 
@@ -408,6 +427,8 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
        }
 
        if (vif->hash.size != 0) {
+               const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
+
                seq_puts(m, "\nHash Mapping:\n");
 
                for (i = 0; i < vif->hash.size; ) {
@@ -420,7 +441,7 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
                        seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
 
                        for (j = 0; j < n; j++, i++)
-                               seq_printf(m, "%4u ", vif->hash.mapping[i]);
+                               seq_printf(m, "%4u ", mapping[i]);
 
                        seq_puts(m, "\n");
                }
index 92274c2372008a57ba12ca960bafa84cd2eac7b3..f6ae23fc3f6b086e60149befd9a3ca9500a48bf1 100644 (file)
@@ -162,7 +162,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
        if (size == 0)
                return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
 
-       return vif->hash.mapping[skb_get_hash_raw(skb) % size];
+       return vif->hash.mapping[vif->hash.mapping_sel]
+                               [skb_get_hash_raw(skb) % size];
 }
 
 static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
index de8282420f966f0d0f984c72868d9ebddcf11207..ffce6f39828aa1799c49975df1d85fe2d8ccbe38 100644 (file)
@@ -610,7 +610,7 @@ static void qeth_put_reply(struct qeth_reply *reply)
 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
                struct qeth_card *card)
 {
-       char *ipa_name;
+       const char *ipa_name;
        int com = cmd->hdr.command;
        ipa_name = qeth_get_ipa_cmd_name(com);
        if (rc)
index 5bcb8dafc3ee506f9ff3487bdf8e01cf274e35b2..e891c0b52f4ccc79995b6c60a8a05e6e12f92005 100644 (file)
@@ -148,10 +148,10 @@ EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
 
 struct ipa_rc_msg {
        enum qeth_ipa_return_codes rc;
-       char *msg;
+       const char *msg;
 };
 
-static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
+static const struct ipa_rc_msg qeth_ipa_rc_msg[] = {
        {IPA_RC_SUCCESS,                "success"},
        {IPA_RC_NOTSUPP,                "Command not supported"},
        {IPA_RC_IP_TABLE_FULL,          "Add Addr IP Table Full - ipv6"},
@@ -219,23 +219,23 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
 
 
 
-char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
+const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
 {
-       int x = 0;
-       qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) /
-                       sizeof(struct ipa_rc_msg) - 1].rc = rc;
-       while (qeth_ipa_rc_msg[x].rc != rc)
-               x++;
+       int x;
+
+       for (x = 0; x < ARRAY_SIZE(qeth_ipa_rc_msg) - 1; x++)
+               if (qeth_ipa_rc_msg[x].rc == rc)
+                       return qeth_ipa_rc_msg[x].msg;
        return qeth_ipa_rc_msg[x].msg;
 }
 
 
 struct ipa_cmd_names {
        enum qeth_ipa_cmds cmd;
-       char *name;
+       const char *name;
 };
 
-static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
+static const struct ipa_cmd_names qeth_ipa_cmd_names[] = {
        {IPA_CMD_STARTLAN,      "startlan"},
        {IPA_CMD_STOPLAN,       "stoplan"},
        {IPA_CMD_SETVMAC,       "setvmac"},
@@ -267,13 +267,12 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
        {IPA_CMD_UNKNOWN,       "unknown"},
 };
 
-char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
+const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
 {
-       int x = 0;
-       qeth_ipa_cmd_names[
-               sizeof(qeth_ipa_cmd_names) /
-                       sizeof(struct ipa_cmd_names)-1].cmd = cmd;
-       while (qeth_ipa_cmd_names[x].cmd != cmd)
-               x++;
+       int x;
+
+       for (x = 0; x < ARRAY_SIZE(qeth_ipa_cmd_names) - 1; x++)
+               if (qeth_ipa_cmd_names[x].cmd == cmd)
+                       return qeth_ipa_cmd_names[x].name;
        return qeth_ipa_cmd_names[x].name;
 }
index aa8b9196b089e0c9b2788493d5103dd5b38ad33d..aa5de1fe01e10068b8913d814c27a9a63bdc95d9 100644 (file)
@@ -797,8 +797,8 @@ enum qeth_ipa_arp_return_codes {
        QETH_IPA_ARP_RC_Q_NO_DATA    = 0x0008,
 };
 
-extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
-extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
+extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
+extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
 
 #define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
                               sizeof(struct qeth_ipacmd_setassparms_hdr))
index ecb22749df0bfa4a4fc0596f8eb32a9b693c5004..8cc0151830433230e8f629c2660eec90871b7e8c 100644 (file)
@@ -2729,6 +2729,9 @@ static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
 {
        unsigned long addr;
 
+       if (!p)
+               return -ENODEV;
+
        addr = gen_pool_alloc(p, cnt);
        if (!addr)
                return -ENOMEM;
index c646d871386130d5dde7df2619f3b7fa7bcf5af6..681f7d4b7724fd2037257fab596fc13fc3bbc7d5 100644 (file)
@@ -626,7 +626,7 @@ static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
 {
        u32 shift;
 
-       shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : RX_SYNC_SHIFT_BASE;
+       shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE;
        shift -= tdm_num * 2;
 
        return shift;
index 3946649b85c8908f4e9874b837ccdad274af8499..ba906876cc454f5e67865ad7af69ee3b37f5f059 100644 (file)
@@ -42,6 +42,7 @@ struct bmp_dib_header {
        u32 colors_important;
 } __packed;
 
+static bool use_bgrt = true;
 static bool request_mem_succeeded = false;
 static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
 
@@ -160,6 +161,9 @@ static void efifb_show_boot_graphics(struct fb_info *info)
        void *bgrt_image = NULL;
        u8 *dst = info->screen_base;
 
+       if (!use_bgrt)
+               return;
+
        if (!bgrt_tab.image_address) {
                pr_info("efifb: No BGRT, not showing boot graphics\n");
                return;
@@ -290,6 +294,8 @@ static int efifb_setup(char *options)
                                screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
                        else if (!strcmp(this_opt, "nowc"))
                                mem_flags &= ~EFI_MEMORY_WC;
+                       else if (!strcmp(this_opt, "nobgrt"))
+                               use_bgrt = false;
                }
        }
 
index ef69273074ba706752b52076b83e2cd070210fb2..a3edb20ea4c36094104e1cc45bfd30c976b0b41e 100644 (file)
@@ -496,6 +496,9 @@ static int omapfb_memory_read(struct fb_info *fbi,
        if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
                return -EFAULT;
 
+       if (mr->w > 4096 || mr->h > 4096)
+               return -EINVAL;
+
        if (mr->w * mr->h * 3 > mr->buffer_size)
                return -EINVAL;
 
@@ -509,7 +512,7 @@ static int omapfb_memory_read(struct fb_info *fbi,
                        mr->x, mr->y, mr->w, mr->h);
 
        if (r > 0) {
-               if (copy_to_user(mr->buffer, buf, mr->buffer_size))
+               if (copy_to_user(mr->buffer, buf, r))
                        r = -EFAULT;
        }
 
index def3a501acd64484342f2315c9b32fe697b166a5..d059d04c63acd7bc118bbc0ec02fc3cb1dfa41f8 100644 (file)
@@ -712,7 +712,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
        /*
         * enable controller clock
         */
-       clk_enable(fbi->clk);
+       clk_prepare_enable(fbi->clk);
 
        pxa168fb_set_par(info);
 
@@ -767,7 +767,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
 failed_free_cmap:
        fb_dealloc_cmap(&info->cmap);
 failed_free_clk:
-       clk_disable(fbi->clk);
+       clk_disable_unprepare(fbi->clk);
 failed_free_fbmem:
        dma_free_coherent(fbi->dev, info->fix.smem_len,
                        info->screen_base, fbi->fb_start_dma);
@@ -807,7 +807,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
        dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
                    info->screen_base, info->fix.smem_start);
 
-       clk_disable(fbi->clk);
+       clk_disable_unprepare(fbi->clk);
 
        framebuffer_release(info);
 
index 045e8afe398be35866adb64d774c0ebf0dd9834c..9e88e3f594c29c4d4a0c7362500b494fbf0ca2db 100644 (file)
@@ -1157,7 +1157,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
                        dev_name);
                   goto out_err0;
                }
-               /* fall though */
+               /* fall through */
        case S9000_ID_ARTIST:
        case S9000_ID_HCRX:
        case S9000_ID_TIMBER:
index bbd1e357c23df64b385f4baf119e649342c9c50e..f4fd2e72add4ebd512d4e3e499c0c07999c455df 100644 (file)
@@ -898,8 +898,22 @@ static struct platform_driver ramoops_driver = {
        },
 };
 
-static void ramoops_register_dummy(void)
+static inline void ramoops_unregister_dummy(void)
 {
+       platform_device_unregister(dummy);
+       dummy = NULL;
+
+       kfree(dummy_data);
+       dummy_data = NULL;
+}
+
+static void __init ramoops_register_dummy(void)
+{
+       /*
+        * Prepare a dummy platform data structure to carry the module
+        * parameters. If mem_size isn't set, then there are no module
+        * parameters, and we can skip this.
+        */
        if (!mem_size)
                return;
 
@@ -932,21 +946,28 @@ static void ramoops_register_dummy(void)
        if (IS_ERR(dummy)) {
                pr_info("could not create platform device: %ld\n",
                        PTR_ERR(dummy));
+               dummy = NULL;
+               ramoops_unregister_dummy();
        }
 }
 
 static int __init ramoops_init(void)
 {
+       int ret;
+
        ramoops_register_dummy();
-       return platform_driver_register(&ramoops_driver);
+       ret = platform_driver_register(&ramoops_driver);
+       if (ret != 0)
+               ramoops_unregister_dummy();
+
+       return ret;
 }
 late_initcall(ramoops_init);
 
 static void __exit ramoops_exit(void)
 {
        platform_driver_unregister(&ramoops_driver);
-       platform_device_unregister(dummy);
-       kfree(dummy_data);
+       ramoops_unregister_dummy();
 }
 module_exit(ramoops_exit);
 
index daa732550088957538842fc34f8d84228ccbde33..0d6a6a4af8616dcdc484a6f0a417167460adfa64 100644 (file)
@@ -948,17 +948,19 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
        int err = 0;
 
 #ifdef CONFIG_FS_POSIX_ACL
-       if (inode->i_acl) {
-               err = xattr_list_one(&buffer, &remaining_size,
-                                    XATTR_NAME_POSIX_ACL_ACCESS);
-               if (err)
-                       return err;
-       }
-       if (inode->i_default_acl) {
-               err = xattr_list_one(&buffer, &remaining_size,
-                                    XATTR_NAME_POSIX_ACL_DEFAULT);
-               if (err)
-                       return err;
+       if (IS_POSIXACL(inode)) {
+               if (inode->i_acl) {
+                       err = xattr_list_one(&buffer, &remaining_size,
+                                            XATTR_NAME_POSIX_ACL_ACCESS);
+                       if (err)
+                               return err;
+               }
+               if (inode->i_default_acl) {
+                       err = xattr_list_one(&buffer, &remaining_size,
+                                            XATTR_NAME_POSIX_ACL_DEFAULT);
+                       if (err)
+                               return err;
+               }
        }
 #endif
 
index 83a33a1873a6823be1a033dec3f0743a08732f88..7f5ca2cd3a32f7438f3f1ab39ad47422a701b53b 100644 (file)
@@ -90,6 +90,8 @@ struct mlx5_hairpin {
 
        u32 *rqn;
        u32 *sqn;
+
+       bool peer_gone;
 };
 
 struct mlx5_hairpin *
index ca5ab98053c8d48312d9f479a861c6c426b33e54..c7861e4b402c131cfb548f7d0ed863c4ec3ee3e5 100644 (file)
@@ -1730,6 +1730,8 @@ enum netdev_priv_flags {
  *                     switch driver and used to set the phys state of the
  *                     switch port.
  *
+ *     @wol_enabled:   Wake-on-LAN is enabled
+ *
  *     FIXME: cleanup struct net_device such that network protocol info
  *     moves out.
  */
@@ -2014,6 +2016,7 @@ struct net_device {
        struct lock_class_key   *qdisc_tx_busylock;
        struct lock_class_key   *qdisc_running_key;
        bool                    proto_down;
+       unsigned                wol_enabled:1;
 };
 #define to_net_dev(d) container_of(d, struct net_device, dev)
 
index 07efffd0c759d0b509dec19acb6b718cbab06031..bbe99d2b28b4c62063450b7c4dadc4013c377897 100644 (file)
@@ -215,6 +215,8 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
                break;
        case NFPROTO_ARP:
 #ifdef CONFIG_NETFILTER_FAMILY_ARP
+               if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
+                       break;
                hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
 #endif
                break;
index ea73fef8bdc021b48e68b4b4ce8bb7fe7fc44b57..8586cfb498286ce4399487f4f4627ff2a0b16e9f 100644 (file)
@@ -38,10 +38,13 @@ struct v4l2_ctrl_handler;
  * @prio: priority of the file handler, as defined by &enum v4l2_priority
  *
  * @wait: event' s wait queue
+ * @subscribe_lock: serialise changes to the subscribed list; guarantee that
+ *                 the add and del event callbacks are orderly called
  * @subscribed: list of subscribed events
  * @available: list of events waiting to be dequeued
  * @navailable: number of available events at @available list
  * @sequence: event sequence number
+ *
  * @m2m_ctx: pointer to &struct v4l2_m2m_ctx
  */
 struct v4l2_fh {
@@ -52,6 +55,7 @@ struct v4l2_fh {
 
        /* Events */
        wait_queue_head_t       wait;
+       struct mutex            subscribe_lock;
        struct list_head        subscribed;
        struct list_head        available;
        unsigned int            navailable;
index a2d058170ea3c38739263570bcf14f2a0935e16f..b46d68acf7011f39d86eb65ad2d9886f650f144e 100644 (file)
@@ -139,12 +139,6 @@ struct bond_parm_tbl {
        int mode;
 };
 
-struct netdev_notify_work {
-       struct delayed_work     work;
-       struct net_device       *dev;
-       struct netdev_bonding_info bonding_info;
-};
-
 struct slave {
        struct net_device *dev; /* first - useful for panic debug */
        struct bonding *bond; /* our master */
@@ -172,6 +166,7 @@ struct slave {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll *np;
 #endif
+       struct delayed_work notify_work;
        struct kobject kobj;
        struct rtnl_link_stats64 slave_stats;
 };
index 8ebabc9873d1593b46161697b53c8a8d14242000..4de121e24ce58fa6e19aa24ffec8cf2c79483132 100644 (file)
@@ -4852,8 +4852,6 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
  *
  * @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried.
  * @freq: the freqency(in MHz) to be queried.
- * @ptr: pointer where the regdb wmm data is to be stored (or %NULL if
- *     irrelevant). This can be used later for deduplication.
  * @rule: pointer to store the wmm rule from the regulatory db.
  *
  * Self-managed wireless drivers can use this function to  query
index e03b93360f332b3e3232873ac1cbd0ee7478fabb..a80fd0ac4563283246f4f53cea1ac0cd17b41dab 100644 (file)
@@ -130,12 +130,6 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
        return sk->sk_bound_dev_if;
 }
 
-static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
-{
-       return rcu_dereference_check(ireq->ireq_opt,
-                                    refcount_read(&ireq->req.rsk_refcnt) > 0);
-}
-
 struct inet_cork {
        unsigned int            flags;
        __be32                  addr;
index 0c154f98e987367256a40a1154eeb5ea8923a730..39e1d875d507780a08d5cb8b939083d9e58f1e22 100644 (file)
  *   nla_find()                                find attribute in stream of attributes
  *   nla_find_nested()                 find attribute in nested attributes
  *   nla_parse()                       parse and validate stream of attrs
- *   nla_parse_nested()                        parse nested attribuets
+ *   nla_parse_nested()                        parse nested attributes
  *   nla_for_each_attr()               loop over all attributes
  *   nla_for_each_nested()             loop over the nested attributes
  *=========================================================================
index 196587b8f204de13da0529c3cce46b68df75b4ac..837393fa897bb764264741ec2051f163841f0a4d 100644 (file)
@@ -56,7 +56,6 @@ enum rxrpc_peer_trace {
        rxrpc_peer_new,
        rxrpc_peer_processing,
        rxrpc_peer_put,
-       rxrpc_peer_queued_error,
 };
 
 enum rxrpc_conn_trace {
@@ -257,8 +256,7 @@ enum rxrpc_tx_point {
        EM(rxrpc_peer_got,                      "GOT") \
        EM(rxrpc_peer_new,                      "NEW") \
        EM(rxrpc_peer_processing,               "PRO") \
-       EM(rxrpc_peer_put,                      "PUT") \
-       E_(rxrpc_peer_queued_error,             "QER")
+       E_(rxrpc_peer_put,                      "PUT")
 
 #define rxrpc_conn_traces \
        EM(rxrpc_conn_got,                      "GOT") \
index 3bdc8f3ca259ed2d82bb9861033814d65591a51c..ccce954f814682a40ba5d8af0ab463d5b0bfda3b 100644 (file)
@@ -2434,9 +2434,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
        /* LE address type */
        addr_type = le_addr_type(cp->addr.type);
 
-       hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
-
-       err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
+       /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
+       err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
        if (err < 0) {
                err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
                                        MGMT_STATUS_NOT_PAIRED, &rp,
@@ -2450,8 +2449,6 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                goto done;
        }
 
-       /* Abort any ongoing SMP pairing */
-       smp_cancel_pairing(conn);
 
        /* Defer clearing up the connection parameters until closing to
         * give a chance of keeping them if a repairing happens.
index 3a7b0773536b8e226546ebe417463fcba5c92ba4..73f7211d0431a0f766dfacd488077a081054b67c 100644 (file)
@@ -2422,30 +2422,51 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
        return ret;
 }
 
-void smp_cancel_pairing(struct hci_conn *hcon)
+int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                 u8 addr_type)
 {
-       struct l2cap_conn *conn = hcon->l2cap_data;
+       struct hci_conn *hcon;
+       struct l2cap_conn *conn;
        struct l2cap_chan *chan;
        struct smp_chan *smp;
+       int err;
+
+       err = hci_remove_ltk(hdev, bdaddr, addr_type);
+       hci_remove_irk(hdev, bdaddr, addr_type);
+
+       hcon = hci_conn_hash_lookup_le(hdev, bdaddr, addr_type);
+       if (!hcon)
+               goto done;
 
+       conn = hcon->l2cap_data;
        if (!conn)
-               return;
+               goto done;
 
        chan = conn->smp;
        if (!chan)
-               return;
+               goto done;
 
        l2cap_chan_lock(chan);
 
        smp = chan->data;
        if (smp) {
+               /* Set keys to NULL to make sure smp_failure() does not try to
+                * remove and free already invalidated rcu list entries. */
+               smp->ltk = NULL;
+               smp->slave_ltk = NULL;
+               smp->remote_irk = NULL;
+
                if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
                        smp_failure(conn, 0);
                else
                        smp_failure(conn, SMP_UNSPECIFIED);
+               err = 0;
        }
 
        l2cap_chan_unlock(chan);
+
+done:
+       return err;
 }
 
 static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
index 0ff6247eaa6c0e8c19223c014d11a98d8adaee8a..121edadd5f8da8761c7ef464c22e6455a25d2d27 100644 (file)
@@ -181,7 +181,8 @@ enum smp_key_pref {
 };
 
 /* SMP Commands */
-void smp_cancel_pairing(struct hci_conn *hcon);
+int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                 u8 addr_type);
 bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
                             enum smp_key_pref key_pref);
 int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
index 6e0dc6bcd32af7e056e3006fdc0a483aaf4771d1..37278dc280eb3540b24ec22502f01bea2c124199 100644 (file)
@@ -835,7 +835,8 @@ static unsigned int ip_sabotage_in(void *priv,
                                   struct sk_buff *skb,
                                   const struct nf_hook_state *state)
 {
-       if (skb->nf_bridge && !skb->nf_bridge->in_prerouting) {
+       if (skb->nf_bridge && !skb->nf_bridge->in_prerouting &&
+           !netif_is_l3_master(skb->dev)) {
                state->okfn(state->net, state->sk, skb);
                return NF_STOLEN;
        }
index 234a0ec2e9327727e95a76f44a29e8cc93feff84..0762aaf8e964ec4c517984fdff8ddfdc4afef99e 100644 (file)
@@ -1483,6 +1483,7 @@ static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
 static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
 {
        struct ethtool_wolinfo wol;
+       int ret;
 
        if (!dev->ethtool_ops->set_wol)
                return -EOPNOTSUPP;
@@ -1490,7 +1491,13 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
        if (copy_from_user(&wol, useraddr, sizeof(wol)))
                return -EFAULT;
 
-       return dev->ethtool_ops->set_wol(dev, &wol);
+       ret = dev->ethtool_ops->set_wol(dev, &wol);
+       if (ret)
+               return ret;
+
+       dev->wol_enabled = !!wol.wolopts;
+
+       return 0;
 }
 
 static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
index 3219a2932463096566ce8ff336ecdf699422dd65..de1d1ba92f2de39292987e1408db0c2b821c4b6d 100644 (file)
@@ -135,27 +135,9 @@ static void queue_process(struct work_struct *work)
        }
 }
 
-/*
- * Check whether delayed processing was scheduled for our NIC. If so,
- * we attempt to grab the poll lock and use ->poll() to pump the card.
- * If this fails, either we've recursed in ->poll() or it's already
- * running on another CPU.
- *
- * Note: we don't mask interrupts with this lock because we're using
- * trylock here and interrupts are already disabled in the softirq
- * case. Further, we test the poll_owner to avoid recursion on UP
- * systems where the lock doesn't exist.
- */
 static void poll_one_napi(struct napi_struct *napi)
 {
-       int work = 0;
-
-       /* net_rx_action's ->poll() invocations and our's are
-        * synchronized by this test which is only made while
-        * holding the napi->poll_lock.
-        */
-       if (!test_bit(NAPI_STATE_SCHED, &napi->state))
-               return;
+       int work;
 
        /* If we set this bit but see that it has already been set,
         * that indicates that napi has been disabled and we need
@@ -330,6 +312,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
        /* It is up to the caller to keep npinfo alive. */
        struct netpoll_info *npinfo;
 
+       rcu_read_lock_bh();
        lockdep_assert_irqs_disabled();
 
        npinfo = rcu_dereference_bh(np->dev->npinfo);
@@ -374,6 +357,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
                skb_queue_tail(&npinfo->txq, skb);
                schedule_delayed_work(&npinfo->tx_work,0);
        }
+       rcu_read_unlock_bh();
 }
 EXPORT_SYMBOL(netpoll_send_skb_on_dev);
 
index 63ce2283a456a4aeabf3c10b681fe63195209101..448703312fed0f736bc7f0d58e7bda705a2344f2 100644 (file)
@@ -1898,10 +1898,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
                if (tb[IFLA_IF_NETNSID]) {
                        netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
                        tgt_net = get_target_net(skb->sk, netnsid);
-                       if (IS_ERR(tgt_net)) {
-                               tgt_net = net;
-                               netnsid = -1;
-                       }
+                       if (IS_ERR(tgt_net))
+                               return PTR_ERR(tgt_net);
                }
 
                if (tb[IFLA_EXT_MASK])
@@ -2837,6 +2835,12 @@ struct net_device *rtnl_create_link(struct net *net,
        else if (ops->get_num_rx_queues)
                num_rx_queues = ops->get_num_rx_queues();
 
+       if (num_tx_queues < 1 || num_tx_queues > 4096)
+               return ERR_PTR(-EINVAL);
+
+       if (num_rx_queues < 1 || num_rx_queues > 4096)
+               return ERR_PTR(-EINVAL);
+
        dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
                               ops->setup, num_tx_queues, num_rx_queues);
        if (!dev)
index d28d46bff6ab43441f34284ec975c1e052a774d0..85d6c879383da8994c6b20cd1e49e0f667a07482 100644 (file)
@@ -606,11 +606,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        if (sk->sk_state == DCCP_LISTEN) {
                if (dh->dccph_type == DCCP_PKT_REQUEST) {
                        /* It is possible that we process SYN packets from backlog,
-                        * so we need to make sure to disable BH right there.
+                        * so we need to make sure to disable BH and RCU right there.
                         */
+                       rcu_read_lock();
                        local_bh_disable();
                        acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
                        local_bh_enable();
+                       rcu_read_unlock();
                        if (!acceptable)
                                return 1;
                        consume_skb(skb);
index b08feb219b44b67eadf408a33649d8c7ec9db2d0..8e08cea6f17866b5fb1619f570de747c6a837cbd 100644 (file)
@@ -493,9 +493,11 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
 
                dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
                                                              ireq->ir_rmt_addr);
+               rcu_read_lock();
                err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
                                            ireq->ir_rmt_addr,
-                                           ireq_opt_deref(ireq));
+                                           rcu_dereference(ireq->ireq_opt));
+               rcu_read_unlock();
                err = net_xmit_eval(err);
        }
 
index dfd5009f96ef7111a593651a48f73c4a92c3ed15..15e7f7915a21e0fbce09d5d2c17d877eae499e03 100644 (file)
@@ -544,7 +544,8 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
        struct ip_options_rcu *opt;
        struct rtable *rt;
 
-       opt = ireq_opt_deref(ireq);
+       rcu_read_lock();
+       opt = rcu_dereference(ireq->ireq_opt);
 
        flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
@@ -558,11 +559,13 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
                goto no_route;
        if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
                goto route_err;
+       rcu_read_unlock();
        return &rt->dst;
 
 route_err:
        ip_rt_put(rt);
 no_route:
+       rcu_read_unlock();
        __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
        return NULL;
 }
index c0fe5ad996f238091f5b9585adb586a571f653f0..26c36cccabdc2c8cc95cfd609672d412c493fc42 100644 (file)
@@ -149,7 +149,6 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
 {
        struct sockaddr_in sin;
-       const struct iphdr *iph = ip_hdr(skb);
        __be16 *ports;
        int end;
 
@@ -164,7 +163,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
        ports = (__be16 *)skb_transport_header(skb);
 
        sin.sin_family = AF_INET;
-       sin.sin_addr.s_addr = iph->daddr;
+       sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
        sin.sin_port = ports[1];
        memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
 
index b92f422f2fa805cd5cca8264fe9ae5aa6d6a65b8..891ed2f91467b9345743682a3dd6e818acb48fbd 100644 (file)
@@ -48,6 +48,7 @@ static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
 static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 static int comp_sack_nr_max = 255;
+static u32 u32_max_div_HZ = UINT_MAX / HZ;
 
 /* obsolete */
 static int sysctl_tcp_low_latency __read_mostly;
@@ -745,9 +746,10 @@ static struct ctl_table ipv4_net_table[] = {
        {
                .procname       = "tcp_probe_interval",
                .data           = &init_net.ipv4.sysctl_tcp_probe_interval,
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(u32),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_douintvec_minmax,
+               .extra2         = &u32_max_div_HZ,
        },
        {
                .procname       = "igmp_link_local_mcast_reports",
index 4cf2f7bb2802ad4ae968b5a6dfb9d005ed619c76..47e08c1b5bc3e14e6ae2851b7ec8de91a3eb4a35 100644 (file)
@@ -6009,11 +6009,13 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
                        if (th->fin)
                                goto discard;
                        /* It is possible that we process SYN packets from backlog,
-                        * so we need to make sure to disable BH right there.
+                        * so we need to make sure to disable BH and RCU right there.
                         */
+                       rcu_read_lock();
                        local_bh_disable();
                        acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
                        local_bh_enable();
+                       rcu_read_unlock();
 
                        if (!acceptable)
                                return 1;
index 44c09eddbb781c03da2417aaa925e360de01a6e9..cd426313a29819b34648086b551fe9390d8a0b0a 100644 (file)
@@ -943,9 +943,11 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
        if (skb) {
                __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 
+               rcu_read_lock();
                err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
                                            ireq->ir_rmt_addr,
-                                           ireq_opt_deref(ireq));
+                                           rcu_dereference(ireq->ireq_opt));
+               rcu_read_unlock();
                err = net_xmit_eval(err);
        }
 
index bcfc00e88756dabb1f491d3d41137ccbc7ab1cbc..f8de2482a52923709ed58c401785a6ac60771932 100644 (file)
@@ -67,6 +67,7 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
 
        if (xo && (xo->flags & XFRM_GRO)) {
                skb_mac_header_rebuild(skb);
+               skb_reset_transport_header(skb);
                return 0;
        }
 
index 3d36644890bb6d3b0a755c811c60e920ad5cd8b8..1ad2c2c4e250f84b1ad73020c727ea8b68b3e0d3 100644 (file)
@@ -46,7 +46,6 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
 static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
 {
        int ihl = skb->data - skb_transport_header(skb);
-       struct xfrm_offload *xo = xfrm_offload(skb);
 
        if (skb->transport_header != skb->network_header) {
                memmove(skb_transport_header(skb),
@@ -54,8 +53,7 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
                skb->network_header = skb->transport_header;
        }
        ip_hdr(skb)->tot_len = htons(skb->len + ihl);
-       if (!xo || !(xo->flags & XFRM_GRO))
-               skb_reset_transport_header(skb);
+       skb_reset_transport_header(skb);
        return 0;
 }
 
index 826b14de7dbbc8d1e2100820374654e5722c32b6..a366c05a239da50e98ced776b66d34f923900701 100644 (file)
@@ -4321,11 +4321,6 @@ static int ip6_route_info_append(struct net *net,
        if (!nh)
                return -ENOMEM;
        nh->fib6_info = rt;
-       err = ip6_convert_metrics(net, rt, r_cfg);
-       if (err) {
-               kfree(nh);
-               return err;
-       }
        memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
        list_add_tail(&nh->next, rt6_nh_list);
 
index 841f4a07438e83502eadd6ec6c16a16d1de6aa55..9ef490dddcea23b82bd703217bfdde49dce41069 100644 (file)
@@ -59,6 +59,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
 
        if (xo && (xo->flags & XFRM_GRO)) {
                skb_mac_header_rebuild(skb);
+               skb_reset_transport_header(skb);
                return -1;
        }
 
index 9ad07a91708ef7a1008d469766ab39b9b882883f..3c29da5defe6c357ff04ca4adead1a9fee208f08 100644 (file)
@@ -51,7 +51,6 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
 static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
 {
        int ihl = skb->data - skb_transport_header(skb);
-       struct xfrm_offload *xo = xfrm_offload(skb);
 
        if (skb->transport_header != skb->network_header) {
                memmove(skb_transport_header(skb),
@@ -60,8 +59,7 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
        }
        ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
                                           sizeof(struct ipv6hdr));
-       if (!xo || !(xo->flags & XFRM_GRO))
-               skb_reset_transport_header(skb);
+       skb_reset_transport_header(skb);
        return 0;
 }
 
index 5959ce9620eb92ece2830d6a59ed21d562a3a1cf..6a74080005cf6acf15fa59d6d3dd14cbf01a1781 100644 (file)
@@ -170,9 +170,11 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 
        if (toobig && xfrm6_local_dontfrag(skb)) {
                xfrm6_local_rxpmtu(skb, mtu);
+               kfree_skb(skb);
                return -EMSGSIZE;
        } else if (!skb->ignore_df && toobig && skb->sk) {
                xfrm_local_error(skb, mtu);
+               kfree_skb(skb);
                return -EMSGSIZE;
        }
 
index 5e6cf2cee965264dd45cda775b370b6dcb022413..5836ddeac9e34ecd2aa6e51363679d2cd11f266d 100644 (file)
@@ -1756,7 +1756,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
 
                if (local->ops->wake_tx_queue &&
                    type != NL80211_IFTYPE_AP_VLAN &&
-                   type != NL80211_IFTYPE_MONITOR)
+                   (type != NL80211_IFTYPE_MONITOR ||
+                    (params->flags & MONITOR_FLAG_ACTIVE)))
                        txq_size += sizeof(struct txq_info) +
                                    local->hw.txq_data_size;
 
index ee56f18cad3f7e89e1c60fe4829dab7bfa1ef340..21526630bf6559fed1ecd1894a4796db5216fd56 100644 (file)
@@ -217,7 +217,8 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
 int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
 void ieee80211s_init(void);
 void ieee80211s_update_metric(struct ieee80211_local *local,
-                             struct sta_info *sta, struct sk_buff *skb);
+                             struct sta_info *sta,
+                             struct ieee80211_tx_status *st);
 void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
 void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata);
 int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
index daf9db3c8f24f389df84d95ae973c969d65622f1..6950cd0bf5940a0bc76ea0f3bc283c4a1cac7963 100644 (file)
@@ -295,15 +295,12 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
 }
 
 void ieee80211s_update_metric(struct ieee80211_local *local,
-               struct sta_info *sta, struct sk_buff *skb)
+                             struct sta_info *sta,
+                             struct ieee80211_tx_status *st)
 {
-       struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       struct ieee80211_tx_info *txinfo = st->info;
        int failed;
 
-       if (!ieee80211_is_data(hdr->frame_control))
-               return;
-
        failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
 
        /* moving average, scaled to 100.
index 9a6d7208bf4f809b8cb78856e688c19730ee097e..91d7c0cd18824042044a861cfc1bcb4308c803de 100644 (file)
@@ -479,11 +479,6 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
        if (!skb)
                return;
 
-       if (dropped) {
-               dev_kfree_skb_any(skb);
-               return;
-       }
-
        if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
                u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
                struct ieee80211_sub_if_data *sdata;
@@ -506,6 +501,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
                }
                rcu_read_unlock();
 
+               dev_kfree_skb_any(skb);
+       } else if (dropped) {
                dev_kfree_skb_any(skb);
        } else {
                /* consumes skb */
@@ -811,7 +808,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
 
                rate_control_tx_status(local, sband, status);
                if (ieee80211_vif_is_mesh(&sta->sdata->vif))
-                       ieee80211s_update_metric(local, sta, skb);
+                       ieee80211s_update_metric(local, sta, status);
 
                if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
                        ieee80211_frame_acked(sta, skb);
@@ -972,6 +969,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
                }
 
                rate_control_tx_status(local, sband, status);
+               if (ieee80211_vif_is_mesh(&sta->sdata->vif))
+                       ieee80211s_update_metric(local, sta, status);
        }
 
        if (acked || noack_success) {
index 5cd5e6e5834efc820c94d299dadfd9164e217253..6c647f425e057d6d3c56acc3ceeb108868541967 100644 (file)
@@ -16,6 +16,7 @@
 #include "ieee80211_i.h"
 #include "driver-ops.h"
 #include "rate.h"
+#include "wme.h"
 
 /* give usermode some time for retries in setting up the TDLS session */
 #define TDLS_PEER_SETUP_TIMEOUT        (15 * HZ)
@@ -1010,14 +1011,13 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
        switch (action_code) {
        case WLAN_TDLS_SETUP_REQUEST:
        case WLAN_TDLS_SETUP_RESPONSE:
-               skb_set_queue_mapping(skb, IEEE80211_AC_BK);
-               skb->priority = 2;
+               skb->priority = 256 + 2;
                break;
        default:
-               skb_set_queue_mapping(skb, IEEE80211_AC_VI);
-               skb->priority = 5;
+               skb->priority = 256 + 5;
                break;
        }
+       skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
 
        /*
         * Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress.
index f353d9db54bc1f049e14b20713af88bde1da3c62..25ba24bef8f51f669ccbcc95cfcb5f1d235e39a9 100644 (file)
@@ -214,6 +214,7 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
 {
        struct ieee80211_local *local = tx->local;
        struct ieee80211_if_managed *ifmgd;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
 
        /* driver doesn't support power save */
        if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS))
@@ -242,6 +243,9 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
        if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
                return TX_CONTINUE;
 
+       if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK))
+               return TX_CONTINUE;
+
        ifmgd = &tx->sdata->u.mgd;
 
        /*
@@ -1890,7 +1894,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
                        sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
 
        if (invoke_tx_handlers_early(&tx))
-               return false;
+               return true;
 
        if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
                return true;
index b4bdf9eda7b740dccb6501c5b0155c32d17d7e5b..247b89784a6fb41141bb20cc4d4e5987b33e17a7 100644 (file)
@@ -1213,8 +1213,8 @@ static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
 #define TCP_NLATTR_SIZE        ( \
        NLA_ALIGN(NLA_HDRLEN + 1) + \
        NLA_ALIGN(NLA_HDRLEN + 1) + \
-       NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))) + \
-       NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))))
+       NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
+       NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
 
 static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
 {
index 5af74b37f4236ec0402ac41360f04a5000ae3126..a35fb59ace7326324811a21704eb2932e2ca5a4c 100644 (file)
@@ -49,7 +49,7 @@ static int nft_osf_init(const struct nft_ctx *ctx,
 
        priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]);
        err = nft_validate_register_store(ctx, priv->dreg, NULL,
-                                         NFTA_DATA_VALUE, NFT_OSF_MAXGENRELEN);
+                                         NFT_DATA_VALUE, NFT_OSF_MAXGENRELEN);
        if (err < 0)
                return err;
 
index 55e2d9215c0d4fe488c0d78c5ce41979e9098852..0e5ec126f6ad0516acf0576f01c4430dec43aec8 100644 (file)
@@ -355,12 +355,11 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
 
 static void nft_rbtree_gc(struct work_struct *work)
 {
+       struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
        struct nft_set_gc_batch *gcb = NULL;
-       struct rb_node *node, *prev = NULL;
-       struct nft_rbtree_elem *rbe;
        struct nft_rbtree *priv;
+       struct rb_node *node;
        struct nft_set *set;
-       int i;
 
        priv = container_of(work, struct nft_rbtree, gc_work.work);
        set  = nft_set_container_of(priv);
@@ -371,7 +370,7 @@ static void nft_rbtree_gc(struct work_struct *work)
                rbe = rb_entry(node, struct nft_rbtree_elem, node);
 
                if (nft_rbtree_interval_end(rbe)) {
-                       prev = node;
+                       rbe_end = rbe;
                        continue;
                }
                if (!nft_set_elem_expired(&rbe->ext))
@@ -379,29 +378,30 @@ static void nft_rbtree_gc(struct work_struct *work)
                if (nft_set_elem_mark_busy(&rbe->ext))
                        continue;
 
+               if (rbe_prev) {
+                       rb_erase(&rbe_prev->node, &priv->root);
+                       rbe_prev = NULL;
+               }
                gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
                if (!gcb)
                        break;
 
                atomic_dec(&set->nelems);
                nft_set_gc_batch_add(gcb, rbe);
+               rbe_prev = rbe;
 
-               if (prev) {
-                       rbe = rb_entry(prev, struct nft_rbtree_elem, node);
+               if (rbe_end) {
                        atomic_dec(&set->nelems);
-                       nft_set_gc_batch_add(gcb, rbe);
-                       prev = NULL;
+                       nft_set_gc_batch_add(gcb, rbe_end);
+                       rb_erase(&rbe_end->node, &priv->root);
+                       rbe_end = NULL;
                }
                node = rb_next(node);
                if (!node)
                        break;
        }
-       if (gcb) {
-               for (i = 0; i < gcb->head.cnt; i++) {
-                       rbe = gcb->elems[i];
-                       rb_erase(&rbe->node, &priv->root);
-               }
-       }
+       if (rbe_prev)
+               rb_erase(&rbe_prev->node, &priv->root);
        write_seqcount_end(&priv->count);
        write_unlock_bh(&priv->lock);
 
index 0472f34728423ac1a3ba839a72e4aab167df1091..ada144e5645bb3075b36b5c4fd23a1bb9020c874 100644 (file)
@@ -56,7 +56,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
        struct sk_buff *pskb = (struct sk_buff *)skb;
        struct sock *sk = skb->sk;
 
-       if (!net_eq(xt_net(par), sock_net(sk)))
+       if (sk && !net_eq(xt_net(par), sock_net(sk)))
                sk = NULL;
 
        if (!sk)
@@ -117,7 +117,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
        struct sk_buff *pskb = (struct sk_buff *)skb;
        struct sock *sk = skb->sk;
 
-       if (!net_eq(xt_net(par), sock_net(sk)))
+       if (sk && !net_eq(xt_net(par), sock_net(sk)))
                sk = NULL;
 
        if (!sk)
index 86a75105af1a2726bc52e44e6c3ac691d719999f..0aeb34c6389dcf1328a9877481d532284eb79433 100644 (file)
@@ -1624,10 +1624,6 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
                OVS_NLERR(log, "Failed to allocate conntrack template");
                return -ENOMEM;
        }
-
-       __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
-       nf_conntrack_get(&ct_info.ct->ct_general);
-
        if (helper) {
                err = ovs_ct_add_helper(&ct_info, helper, key, log);
                if (err)
@@ -1639,6 +1635,8 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
        if (err)
                goto err_free_ct;
 
+       __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
+       nf_conntrack_get(&ct_info.ct->ct_general);
        return 0;
 err_free_ct:
        __ovs_ct_free_action(&ct_info);
index c9755871042159bdf32a7bd980d8ee4d3a9a51bf..ef9554131434496ae02ab6c47418c13ebbc3b239 100644 (file)
@@ -40,17 +40,12 @@ struct rxrpc_crypt {
 struct rxrpc_connection;
 
 /*
- * Mark applied to socket buffers.
+ * Mark applied to socket buffers in skb->mark.  skb->priority is used
+ * to pass supplementary information.
  */
 enum rxrpc_skb_mark {
-       RXRPC_SKB_MARK_DATA,            /* data message */
-       RXRPC_SKB_MARK_FINAL_ACK,       /* final ACK received message */
-       RXRPC_SKB_MARK_BUSY,            /* server busy message */
-       RXRPC_SKB_MARK_REMOTE_ABORT,    /* remote abort message */
-       RXRPC_SKB_MARK_LOCAL_ABORT,     /* local abort message */
-       RXRPC_SKB_MARK_NET_ERROR,       /* network error message */
-       RXRPC_SKB_MARK_LOCAL_ERROR,     /* local error message */
-       RXRPC_SKB_MARK_NEW_CALL,        /* local error message */
+       RXRPC_SKB_MARK_REJECT_BUSY,     /* Reject with BUSY */
+       RXRPC_SKB_MARK_REJECT_ABORT,    /* Reject with ABORT (code in skb->priority) */
 };
 
 /*
@@ -293,7 +288,6 @@ struct rxrpc_peer {
        struct hlist_node       hash_link;
        struct rxrpc_local      *local;
        struct hlist_head       error_targets;  /* targets for net error distribution */
-       struct work_struct      error_distributor;
        struct rb_root          service_conns;  /* Service connections */
        struct list_head        keepalive_link; /* Link in net->peer_keepalive[] */
        time64_t                last_tx_at;     /* Last time packet sent here */
@@ -304,8 +298,6 @@ struct rxrpc_peer {
        unsigned int            maxdata;        /* data size (MTU - hdrsize) */
        unsigned short          hdrsize;        /* header size (IP + UDP + RxRPC) */
        int                     debug_id;       /* debug ID for printks */
-       int                     error_report;   /* Net (+0) or local (+1000000) to distribute */
-#define RXRPC_LOCAL_ERROR_OFFSET 1000000
        struct sockaddr_rxrpc   srx;            /* remote address */
 
        /* calculated RTT cache */
@@ -463,6 +455,16 @@ struct rxrpc_connection {
        u8                      out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
 };
 
+static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
+{
+       return sp->hdr.flags & RXRPC_CLIENT_INITIATED;
+}
+
+static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp)
+{
+       return !rxrpc_to_server(sp);
+}
+
 /*
  * Flags in call->flags.
  */
@@ -717,6 +719,8 @@ extern struct workqueue_struct *rxrpc_workqueue;
 int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
 void rxrpc_discard_prealloc(struct rxrpc_sock *);
 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
+                                          struct rxrpc_sock *,
+                                          struct rxrpc_peer *,
                                           struct rxrpc_connection *,
                                           struct sk_buff *);
 void rxrpc_accept_incoming_calls(struct rxrpc_local *);
@@ -908,7 +912,8 @@ extern unsigned int rxrpc_closed_conn_expiry;
 
 struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
 struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
-                                                  struct sk_buff *);
+                                                  struct sk_buff *,
+                                                  struct rxrpc_peer **);
 void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
 void rxrpc_disconnect_call(struct rxrpc_call *);
 void rxrpc_kill_connection(struct rxrpc_connection *);
@@ -1031,7 +1036,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *);
  * peer_event.c
  */
 void rxrpc_error_report(struct sock *);
-void rxrpc_peer_error_distributor(struct work_struct *);
 void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
                        rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
 void rxrpc_peer_keepalive_worker(struct work_struct *);
@@ -1044,13 +1048,11 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
 struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
                                     struct sockaddr_rxrpc *, gfp_t);
 struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
-struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
-                                             struct rxrpc_peer *);
+void rxrpc_new_incoming_peer(struct rxrpc_local *, struct rxrpc_peer *);
 void rxrpc_destroy_all_peers(struct rxrpc_net *);
 struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
 void rxrpc_put_peer(struct rxrpc_peer *);
-void __rxrpc_queue_peer_error(struct rxrpc_peer *);
 
 /*
  * proc.c
index 9d1e298b784c8b595626ec0b8f5af0f14e7e03a4..9c7f26d06a52f36d98bc78df68e682f51f8eb9e6 100644 (file)
@@ -249,11 +249,11 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
  */
 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
                                                    struct rxrpc_local *local,
+                                                   struct rxrpc_peer *peer,
                                                    struct rxrpc_connection *conn,
                                                    struct sk_buff *skb)
 {
        struct rxrpc_backlog *b = rx->backlog;
-       struct rxrpc_peer *peer, *xpeer;
        struct rxrpc_call *call;
        unsigned short call_head, conn_head, peer_head;
        unsigned short call_tail, conn_tail, peer_tail;
@@ -276,21 +276,18 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
                return NULL;
 
        if (!conn) {
-               /* No connection.  We're going to need a peer to start off
-                * with.  If one doesn't yet exist, use a spare from the
-                * preallocation set.  We dump the address into the spare in
-                * anticipation - and to save on stack space.
-                */
-               xpeer = b->peer_backlog[peer_tail];
-               if (rxrpc_extract_addr_from_skb(local, &xpeer->srx, skb) < 0)
-                       return NULL;
-
-               peer = rxrpc_lookup_incoming_peer(local, xpeer);
-               if (peer == xpeer) {
+               if (peer && !rxrpc_get_peer_maybe(peer))
+                       peer = NULL;
+               if (!peer) {
+                       peer = b->peer_backlog[peer_tail];
+                       if (rxrpc_extract_addr_from_skb(local, &peer->srx, skb) < 0)
+                               return NULL;
                        b->peer_backlog[peer_tail] = NULL;
                        smp_store_release(&b->peer_backlog_tail,
                                          (peer_tail + 1) &
                                          (RXRPC_BACKLOG_MAX - 1));
+
+                       rxrpc_new_incoming_peer(local, peer);
                }
 
                /* Now allocate and set up the connection */
@@ -335,45 +332,31 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
  * The call is returned with the user access mutex held.
  */
 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+                                          struct rxrpc_sock *rx,
+                                          struct rxrpc_peer *peer,
                                           struct rxrpc_connection *conn,
                                           struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-       struct rxrpc_sock *rx;
        struct rxrpc_call *call;
-       u16 service_id = sp->hdr.serviceId;
 
        _enter("");
 
-       /* Get the socket providing the service */
-       rx = rcu_dereference(local->service);
-       if (rx && (service_id == rx->srx.srx_service ||
-                  service_id == rx->second_service))
-               goto found_service;
-
-       trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                         RX_INVALID_OPERATION, EOPNOTSUPP);
-       skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
-       skb->priority = RX_INVALID_OPERATION;
-       _leave(" = NULL [service]");
-       return NULL;
-
-found_service:
        spin_lock(&rx->incoming_lock);
        if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
            rx->sk.sk_state == RXRPC_CLOSE) {
                trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
                                  sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
-               skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+               skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
                skb->priority = RX_INVALID_OPERATION;
                _leave(" = NULL [close]");
                call = NULL;
                goto out;
        }
 
-       call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
+       call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
        if (!call) {
-               skb->mark = RXRPC_SKB_MARK_BUSY;
+               skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
                _leave(" = NULL [busy]");
                call = NULL;
                goto out;
index 9486293fef5c6f98c96397fc90eb14eecf332196..799f75b6900ddc4a7a5aecf87325b355ebbbcecc 100644 (file)
@@ -400,7 +400,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
        rcu_assign_pointer(conn->channels[chan].call, call);
 
        spin_lock(&conn->params.peer->lock);
-       hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
+       hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
        spin_unlock(&conn->params.peer->lock);
 
        _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
index f8f37188a9322829b8f4277c09b7329d2f4c1da0..8acf74fe24c03646916c1b69cddf8c7be3f79d43 100644 (file)
@@ -710,8 +710,8 @@ int rxrpc_connect_call(struct rxrpc_call *call,
        }
 
        spin_lock_bh(&call->conn->params.peer->lock);
-       hlist_add_head(&call->error_link,
-                      &call->conn->params.peer->error_targets);
+       hlist_add_head_rcu(&call->error_link,
+                          &call->conn->params.peer->error_targets);
        spin_unlock_bh(&call->conn->params.peer->lock);
 
 out:
index 77440a356b14ae60e875fcd94a2613227fd899cf..885dae829f4a1a1690334f0a6a3375d34d79bffb 100644 (file)
@@ -69,10 +69,14 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
  * If successful, a pointer to the connection is returned, but no ref is taken.
  * NULL is returned if there is no match.
  *
+ * When searching for a service call, if we find a peer but no connection, we
+ * return that through *_peer in case we need to create a new service call.
+ *
  * The caller must be holding the RCU read lock.
  */
 struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
-                                                  struct sk_buff *skb)
+                                                  struct sk_buff *skb,
+                                                  struct rxrpc_peer **_peer)
 {
        struct rxrpc_connection *conn;
        struct rxrpc_conn_proto k;
@@ -85,9 +89,6 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
        if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0)
                goto not_found;
 
-       k.epoch = sp->hdr.epoch;
-       k.cid   = sp->hdr.cid & RXRPC_CIDMASK;
-
        /* We may have to handle mixing IPv4 and IPv6 */
        if (srx.transport.family != local->srx.transport.family) {
                pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
@@ -99,7 +100,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
        k.epoch = sp->hdr.epoch;
        k.cid   = sp->hdr.cid & RXRPC_CIDMASK;
 
-       if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
+       if (rxrpc_to_server(sp)) {
                /* We need to look up service connections by the full protocol
                 * parameter set.  We look up the peer first as an intermediate
                 * step and then the connection from the peer's tree.
@@ -107,6 +108,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
                peer = rxrpc_lookup_peer_rcu(local, &srx);
                if (!peer)
                        goto not_found;
+               *_peer = peer;
                conn = rxrpc_find_service_conn_rcu(peer, skb);
                if (!conn || atomic_read(&conn->usage) == 0)
                        goto not_found;
@@ -214,7 +216,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
        call->peer->cong_cwnd = call->cong_cwnd;
 
        spin_lock_bh(&conn->params.peer->lock);
-       hlist_del_init(&call->error_link);
+       hlist_del_rcu(&call->error_link);
        spin_unlock_bh(&conn->params.peer->lock);
 
        if (rxrpc_is_client_call(call))
index cfdc199c63510255c1d8cd60baed3c5f66b93d28..800f5b8a1baa04ec2062a975cc501875ece9eb43 100644 (file)
@@ -622,13 +622,14 @@ static void rxrpc_input_requested_ack(struct rxrpc_call *call,
                if (!skb)
                        continue;
 
+               sent_at = skb->tstamp;
+               smp_rmb(); /* Read timestamp before serial. */
                sp = rxrpc_skb(skb);
                if (sp->hdr.serial != orig_serial)
                        continue;
-               smp_rmb();
-               sent_at = skb->tstamp;
                goto found;
        }
+
        return;
 
 found:
@@ -1124,12 +1125,14 @@ void rxrpc_data_ready(struct sock *udp_sk)
 {
        struct rxrpc_connection *conn;
        struct rxrpc_channel *chan;
-       struct rxrpc_call *call;
+       struct rxrpc_call *call = NULL;
        struct rxrpc_skb_priv *sp;
        struct rxrpc_local *local = udp_sk->sk_user_data;
+       struct rxrpc_peer *peer = NULL;
+       struct rxrpc_sock *rx = NULL;
        struct sk_buff *skb;
        unsigned int channel;
-       int ret, skew;
+       int ret, skew = 0;
 
        _enter("%p", udp_sk);
 
@@ -1143,6 +1146,9 @@ void rxrpc_data_ready(struct sock *udp_sk)
                return;
        }
 
+       if (skb->tstamp == 0)
+               skb->tstamp = ktime_get_real();
+
        rxrpc_new_skb(skb, rxrpc_skb_rx_received);
 
        _net("recv skb %p", skb);
@@ -1177,46 +1183,75 @@ void rxrpc_data_ready(struct sock *udp_sk)
 
        trace_rxrpc_rx_packet(sp);
 
-       _net("Rx RxRPC %s ep=%x call=%x:%x",
-            sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
-            sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
-
-       if (sp->hdr.type >= RXRPC_N_PACKET_TYPES ||
-           !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) {
-               _proto("Rx Bad Packet Type %u", sp->hdr.type);
-               goto bad_message;
-       }
-
        switch (sp->hdr.type) {
        case RXRPC_PACKET_TYPE_VERSION:
-               if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED))
+               if (rxrpc_to_client(sp))
                        goto discard;
                rxrpc_post_packet_to_local(local, skb);
                goto out;
 
        case RXRPC_PACKET_TYPE_BUSY:
-               if (sp->hdr.flags & RXRPC_CLIENT_INITIATED)
+               if (rxrpc_to_server(sp))
                        goto discard;
                /* Fall through */
+       case RXRPC_PACKET_TYPE_ACK:
+       case RXRPC_PACKET_TYPE_ACKALL:
+               if (sp->hdr.callNumber == 0)
+                       goto bad_message;
+               /* Fall through */
+       case RXRPC_PACKET_TYPE_ABORT:
+               break;
 
        case RXRPC_PACKET_TYPE_DATA:
-               if (sp->hdr.callNumber == 0)
+               if (sp->hdr.callNumber == 0 ||
+                   sp->hdr.seq == 0)
                        goto bad_message;
                if (sp->hdr.flags & RXRPC_JUMBO_PACKET &&
                    !rxrpc_validate_jumbo(skb))
                        goto bad_message;
                break;
 
+       case RXRPC_PACKET_TYPE_CHALLENGE:
+               if (rxrpc_to_server(sp))
+                       goto discard;
+               break;
+       case RXRPC_PACKET_TYPE_RESPONSE:
+               if (rxrpc_to_client(sp))
+                       goto discard;
+               break;
+
                /* Packet types 9-11 should just be ignored. */
        case RXRPC_PACKET_TYPE_PARAMS:
        case RXRPC_PACKET_TYPE_10:
        case RXRPC_PACKET_TYPE_11:
                goto discard;
+
+       default:
+               _proto("Rx Bad Packet Type %u", sp->hdr.type);
+               goto bad_message;
        }
 
+       if (sp->hdr.serviceId == 0)
+               goto bad_message;
+
        rcu_read_lock();
 
-       conn = rxrpc_find_connection_rcu(local, skb);
+       if (rxrpc_to_server(sp)) {
+               /* Weed out packets to services we're not offering.  Packets
+                * that would begin a call are explicitly rejected and the rest
+                * are just discarded.
+                */
+               rx = rcu_dereference(local->service);
+               if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
+                           sp->hdr.serviceId != rx->second_service)) {
+                       if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
+                           sp->hdr.seq == 1)
+                               goto unsupported_service;
+                       goto discard_unlock;
+               }
+       }
+
+       conn = rxrpc_find_connection_rcu(local, skb, &peer);
        if (conn) {
                if (sp->hdr.securityIndex != conn->security_ix)
                        goto wrong_security;
@@ -1280,7 +1315,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
                call = rcu_dereference(chan->call);
 
                if (sp->hdr.callNumber > chan->call_id) {
-                       if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED)) {
+                       if (rxrpc_to_client(sp)) {
                                rcu_read_unlock();
                                goto reject_packet;
                        }
@@ -1297,19 +1332,15 @@ void rxrpc_data_ready(struct sock *udp_sk)
                        if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags))
                                set_bit(RXRPC_CALL_RX_HEARD, &call->flags);
                }
-       } else {
-               skew = 0;
-               call = NULL;
        }
 
        if (!call || atomic_read(&call->usage) == 0) {
-               if (!(sp->hdr.type & RXRPC_CLIENT_INITIATED) ||
-                   sp->hdr.callNumber == 0 ||
+               if (rxrpc_to_client(sp) ||
                    sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
                        goto bad_message_unlock;
                if (sp->hdr.seq != 1)
                        goto discard_unlock;
-               call = rxrpc_new_incoming_call(local, conn, skb);
+               call = rxrpc_new_incoming_call(local, rx, peer, conn, skb);
                if (!call) {
                        rcu_read_unlock();
                        goto reject_packet;
@@ -1340,6 +1371,13 @@ void rxrpc_data_ready(struct sock *udp_sk)
        skb->priority = RXKADINCONSISTENCY;
        goto post_abort;
 
+unsupported_service:
+       rcu_read_unlock();
+       trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+                         RX_INVALID_OPERATION, EOPNOTSUPP);
+       skb->priority = RX_INVALID_OPERATION;
+       goto post_abort;
+
 reupgrade:
        rcu_read_unlock();
        trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
@@ -1354,7 +1392,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
 protocol_error:
        skb->priority = RX_PROTOCOL_ERROR;
 post_abort:
-       skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+       skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
 reject_packet:
        trace_rxrpc_rx_done(skb->mark, skb->priority);
        rxrpc_reject_packet(local, skb);
index 777c3ed4cfc03d3923e052d95597926a1893a163..94d234e9c685fbe4324726df73800ed0f873e01b 100644 (file)
@@ -135,10 +135,10 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
        }
 
        switch (local->srx.transport.family) {
-       case AF_INET:
-               /* we want to receive ICMP errors */
+       case AF_INET6:
+               /* we want to receive ICMPv6 errors */
                opt = 1;
-               ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
+               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
                                        (char *) &opt, sizeof(opt));
                if (ret < 0) {
                        _debug("setsockopt failed");
@@ -146,19 +146,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
                }
 
                /* we want to set the don't fragment bit */
-               opt = IP_PMTUDISC_DO;
-               ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
+               opt = IPV6_PMTUDISC_DO;
+               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
                                        (char *) &opt, sizeof(opt));
                if (ret < 0) {
                        _debug("setsockopt failed");
                        goto error;
                }
-               break;
 
-       case AF_INET6:
+               /* Fall through and set IPv4 options too otherwise we don't get
+                * errors from IPv4 packets sent through the IPv6 socket.
+                */
+
+       case AF_INET:
                /* we want to receive ICMP errors */
                opt = 1;
-               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
+               ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
                                        (char *) &opt, sizeof(opt));
                if (ret < 0) {
                        _debug("setsockopt failed");
@@ -166,13 +169,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
                }
 
                /* we want to set the don't fragment bit */
-               opt = IPV6_PMTUDISC_DO;
-               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
+               opt = IP_PMTUDISC_DO;
+               ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
                                        (char *) &opt, sizeof(opt));
                if (ret < 0) {
                        _debug("setsockopt failed");
                        goto error;
                }
+
+               /* We want receive timestamps. */
+               opt = 1;
+               ret = kernel_setsockopt(local->socket, SOL_SOCKET, SO_TIMESTAMPNS,
+                                       (char *)&opt, sizeof(opt));
+               if (ret < 0) {
+                       _debug("setsockopt failed");
+                       goto error;
+               }
                break;
 
        default:
index ccf5de160444f4f08fa44310dbf4db29bfb8846d..e8fb8922bca838d145ca2c83a145ad5050aae6ea 100644 (file)
@@ -124,7 +124,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
        struct kvec iov[2];
        rxrpc_serial_t serial;
        rxrpc_seq_t hard_ack, top;
-       ktime_t now;
        size_t len, n;
        int ret;
        u8 reason;
@@ -196,9 +195,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
                /* We need to stick a time in before we send the packet in case
                 * the reply gets back before kernel_sendmsg() completes - but
                 * asking UDP to send the packet can take a relatively long
-                * time, so we update the time after, on the assumption that
-                * the packet transmission is more likely to happen towards the
-                * end of the kernel_sendmsg() call.
+                * time.
                 */
                call->ping_time = ktime_get_real();
                set_bit(RXRPC_CALL_PINGING, &call->flags);
@@ -206,9 +203,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
        }
 
        ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
-       now = ktime_get_real();
-       if (ping)
-               call->ping_time = now;
        conn->params.peer->last_tx_at = ktime_get_seconds();
        if (ret < 0)
                trace_rxrpc_tx_fail(call->debug_id, serial, ret,
@@ -363,8 +357,14 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
 
        /* If our RTT cache needs working on, request an ACK.  Also request
         * ACKs if a DATA packet appears to have been lost.
+        *
+        * However, we mustn't request an ACK on the last reply packet of a
+        * service call, lest OpenAFS incorrectly send us an ACK with some
+        * soft-ACKs in it and then never follow up with a proper hard ACK.
         */
-       if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
+       if ((!(sp->hdr.flags & RXRPC_LAST_PACKET) ||
+            rxrpc_to_server(sp)
+            ) &&
            (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
             retrans ||
             call->cong_mode == RXRPC_CALL_SLOW_START ||
@@ -390,6 +390,11 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
                goto send_fragmentable;
 
        down_read(&conn->params.local->defrag_sem);
+
+       sp->hdr.serial = serial;
+       smp_wmb(); /* Set serial before timestamp */
+       skb->tstamp = ktime_get_real();
+
        /* send the packet by UDP
         * - returns -EMSGSIZE if UDP would have to fragment the packet
         *   to go out of the interface
@@ -413,12 +418,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
        trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
                            retrans, lost);
        if (ret >= 0) {
-               ktime_t now = ktime_get_real();
-               skb->tstamp = now;
-               smp_wmb();
-               sp->hdr.serial = serial;
                if (whdr.flags & RXRPC_REQUEST_ACK) {
-                       call->peer->rtt_last_req = now;
+                       call->peer->rtt_last_req = skb->tstamp;
                        trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
                        if (call->peer->rtt_usage > 1) {
                                unsigned long nowj = jiffies, ack_lost_at;
@@ -457,6 +458,10 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
 
        down_write(&conn->params.local->defrag_sem);
 
+       sp->hdr.serial = serial;
+       smp_wmb(); /* Set serial before timestamp */
+       skb->tstamp = ktime_get_real();
+
        switch (conn->params.local->srx.transport.family) {
        case AF_INET:
                opt = IP_PMTUDISC_DONT;
@@ -519,7 +524,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
        struct kvec iov[2];
        size_t size;
        __be32 code;
-       int ret;
+       int ret, ioc;
 
        _enter("%d", local->debug_id);
 
@@ -527,7 +532,6 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
        iov[0].iov_len = sizeof(whdr);
        iov[1].iov_base = &code;
        iov[1].iov_len = sizeof(code);
-       size = sizeof(whdr) + sizeof(code);
 
        msg.msg_name = &srx.transport;
        msg.msg_control = NULL;
@@ -535,17 +539,31 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
        msg.msg_flags = 0;
 
        memset(&whdr, 0, sizeof(whdr));
-       whdr.type = RXRPC_PACKET_TYPE_ABORT;
 
        while ((skb = skb_dequeue(&local->reject_queue))) {
                rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
                sp = rxrpc_skb(skb);
 
+               switch (skb->mark) {
+               case RXRPC_SKB_MARK_REJECT_BUSY:
+                       whdr.type = RXRPC_PACKET_TYPE_BUSY;
+                       size = sizeof(whdr);
+                       ioc = 1;
+                       break;
+               case RXRPC_SKB_MARK_REJECT_ABORT:
+                       whdr.type = RXRPC_PACKET_TYPE_ABORT;
+                       code = htonl(skb->priority);
+                       size = sizeof(whdr) + sizeof(code);
+                       ioc = 2;
+                       break;
+               default:
+                       rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+                       continue;
+               }
+
                if (rxrpc_extract_addr_from_skb(local, &srx, skb) == 0) {
                        msg.msg_namelen = srx.transport_len;
 
-                       code = htonl(skb->priority);
-
                        whdr.epoch      = htonl(sp->hdr.epoch);
                        whdr.cid        = htonl(sp->hdr.cid);
                        whdr.callNumber = htonl(sp->hdr.callNumber);
index 4f9da2f51c694c3f93d3883476057377664b80e7..f3e6fc670da2339998992f0f0904e1f0b767ddd1 100644 (file)
@@ -23,6 +23,8 @@
 #include "ar-internal.h"
 
 static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
+static void rxrpc_distribute_error(struct rxrpc_peer *, int,
+                                  enum rxrpc_call_completion);
 
 /*
  * Find the peer associated with an ICMP packet.
@@ -194,8 +196,6 @@ void rxrpc_error_report(struct sock *sk)
        rcu_read_unlock();
        rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
 
-       /* The ref we obtained is passed off to the work item */
-       __rxrpc_queue_peer_error(peer);
        _leave("");
 }
 
@@ -205,6 +205,7 @@ void rxrpc_error_report(struct sock *sk)
 static void rxrpc_store_error(struct rxrpc_peer *peer,
                              struct sock_exterr_skb *serr)
 {
+       enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
        struct sock_extended_err *ee;
        int err;
 
@@ -255,7 +256,7 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
        case SO_EE_ORIGIN_NONE:
        case SO_EE_ORIGIN_LOCAL:
                _proto("Rx Received local error { error=%d }", err);
-               err += RXRPC_LOCAL_ERROR_OFFSET;
+               compl = RXRPC_CALL_LOCAL_ERROR;
                break;
 
        case SO_EE_ORIGIN_ICMP6:
@@ -264,48 +265,23 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
                break;
        }
 
-       peer->error_report = err;
+       rxrpc_distribute_error(peer, err, compl);
 }
 
 /*
- * Distribute an error that occurred on a peer
+ * Distribute an error that occurred on a peer.
  */
-void rxrpc_peer_error_distributor(struct work_struct *work)
+static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
+                                  enum rxrpc_call_completion compl)
 {
-       struct rxrpc_peer *peer =
-               container_of(work, struct rxrpc_peer, error_distributor);
        struct rxrpc_call *call;
-       enum rxrpc_call_completion compl;
-       int error;
-
-       _enter("");
-
-       error = READ_ONCE(peer->error_report);
-       if (error < RXRPC_LOCAL_ERROR_OFFSET) {
-               compl = RXRPC_CALL_NETWORK_ERROR;
-       } else {
-               compl = RXRPC_CALL_LOCAL_ERROR;
-               error -= RXRPC_LOCAL_ERROR_OFFSET;
-       }
 
-       _debug("ISSUE ERROR %s %d", rxrpc_call_completions[compl], error);
-
-       spin_lock_bh(&peer->lock);
-
-       while (!hlist_empty(&peer->error_targets)) {
-               call = hlist_entry(peer->error_targets.first,
-                                  struct rxrpc_call, error_link);
-               hlist_del_init(&call->error_link);
+       hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
                rxrpc_see_call(call);
-
-               if (rxrpc_set_call_completion(call, compl, 0, -error))
+               if (call->state < RXRPC_CALL_COMPLETE &&
+                   rxrpc_set_call_completion(call, compl, 0, -error))
                        rxrpc_notify_socket(call);
        }
-
-       spin_unlock_bh(&peer->lock);
-
-       rxrpc_put_peer(peer);
-       _leave("");
 }
 
 /*
index 1dc7648e3eff34f25ceea7b0edbd74b5f8cd02b3..01a9febfa36714da7293c1b9b5a5235d0947f8d0 100644 (file)
@@ -124,11 +124,9 @@ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
        struct rxrpc_net *rxnet = local->rxnet;
 
        hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
-               if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0) {
-                       if (atomic_read(&peer->usage) == 0)
-                               return NULL;
+               if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 &&
+                   atomic_read(&peer->usage) > 0)
                        return peer;
-               }
        }
 
        return NULL;
@@ -222,8 +220,6 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
                atomic_set(&peer->usage, 1);
                peer->local = local;
                INIT_HLIST_HEAD(&peer->error_targets);
-               INIT_WORK(&peer->error_distributor,
-                         &rxrpc_peer_error_distributor);
                peer->service_conns = RB_ROOT;
                seqlock_init(&peer->service_conn_lock);
                spin_lock_init(&peer->lock);
@@ -299,34 +295,23 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
 }
 
 /*
- * Set up a new incoming peer.  The address is prestored in the preallocated
- * peer.
+ * Set up a new incoming peer.  There shouldn't be any other matching peers
+ * since we've already done a search in the list from the non-reentrant context
+ * (the data_ready handler) that is the only place we can add new peers.
  */
-struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
-                                             struct rxrpc_peer *prealloc)
+void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
 {
-       struct rxrpc_peer *peer;
        struct rxrpc_net *rxnet = local->rxnet;
        unsigned long hash_key;
 
-       hash_key = rxrpc_peer_hash_key(local, &prealloc->srx);
-       prealloc->local = local;
-       rxrpc_init_peer(prealloc, hash_key);
+       hash_key = rxrpc_peer_hash_key(local, &peer->srx);
+       peer->local = local;
+       rxrpc_init_peer(peer, hash_key);
 
        spin_lock(&rxnet->peer_hash_lock);
-
-       /* Need to check that we aren't racing with someone else */
-       peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key);
-       if (peer && !rxrpc_get_peer_maybe(peer))
-               peer = NULL;
-       if (!peer) {
-               peer = prealloc;
-               hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
-               list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
-       }
-
+       hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
+       list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
        spin_unlock(&rxnet->peer_hash_lock);
-       return peer;
 }
 
 /*
@@ -415,21 +400,6 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
        return peer;
 }
 
-/*
- * Queue a peer record.  This passes the caller's ref to the workqueue.
- */
-void __rxrpc_queue_peer_error(struct rxrpc_peer *peer)
-{
-       const void *here = __builtin_return_address(0);
-       int n;
-
-       n = atomic_read(&peer->usage);
-       if (rxrpc_queue_work(&peer->error_distributor))
-               trace_rxrpc_peer(peer, rxrpc_peer_queued_error, n, here);
-       else
-               rxrpc_put_peer(peer);
-}
-
 /*
  * Discard a peer record.
  */
index 93da73bf709857bbd48b2859092175bf43df8dfd..f9cb83c938f35d4ad8e381658da53e8f2494ff6a 100644 (file)
@@ -50,7 +50,6 @@ struct rxrpc_wire_header {
 #define RXRPC_PACKET_TYPE_10           10      /* Ignored */
 #define RXRPC_PACKET_TYPE_11           11      /* Ignored */
 #define RXRPC_PACKET_TYPE_VERSION      13      /* version string request */
-#define RXRPC_N_PACKET_TYPES           14      /* number of packet types (incl type 0) */
 
        uint8_t         flags;          /* packet flags */
 #define RXRPC_CLIENT_INITIATED 0x01            /* signifies a packet generated by a client */
@@ -72,20 +71,6 @@ struct rxrpc_wire_header {
 
 } __packed;
 
-#define RXRPC_SUPPORTED_PACKET_TYPES (                 \
-               (1 << RXRPC_PACKET_TYPE_DATA) |         \
-               (1 << RXRPC_PACKET_TYPE_ACK) |          \
-               (1 << RXRPC_PACKET_TYPE_BUSY) |         \
-               (1 << RXRPC_PACKET_TYPE_ABORT) |        \
-               (1 << RXRPC_PACKET_TYPE_ACKALL) |       \
-               (1 << RXRPC_PACKET_TYPE_CHALLENGE) |    \
-               (1 << RXRPC_PACKET_TYPE_RESPONSE) |     \
-               /*(1 << RXRPC_PACKET_TYPE_DEBUG) | */   \
-               (1 << RXRPC_PACKET_TYPE_PARAMS) |       \
-               (1 << RXRPC_PACKET_TYPE_10) |           \
-               (1 << RXRPC_PACKET_TYPE_11) |           \
-               (1 << RXRPC_PACKET_TYPE_VERSION))
-
 /*****************************************************************************/
 /*
  * jumbo packet secondary header
index 23273b5303fd9dcc68cf09ee6f701defe50580b5..8525de8116163a05257753aa00117e20a8b2cc42 100644 (file)
@@ -135,7 +135,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
        }
 
        td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
-       if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) {
+       if (nla_len(tb[TCA_IPT_TARG]) != td->u.target_size) {
                if (exists)
                        tcf_idr_release(*a, bind);
                else
index d74d00b299421a940766f157bf8b48038b827cd1..42191ed9902b8dd38ad41b6221bd4210427b193b 100644 (file)
@@ -1048,7 +1048,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
                if (!ctx->packet || !ctx->packet->has_cookie_echo)
                        return;
 
-               /* fallthru */
+               /* fall through */
        case SCTP_STATE_ESTABLISHED:
        case SCTP_STATE_SHUTDOWN_PENDING:
        case SCTP_STATE_SHUTDOWN_RECEIVED:
index 418f03d0be90f076cf34b1ee96495ceb3a3d68de..645c160520529271cc00e9df50ba086a4feb7fbc 100644 (file)
@@ -609,16 +609,18 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
 
        switch (evt) {
        case NETDEV_CHANGE:
-               if (netif_carrier_ok(dev))
+               if (netif_carrier_ok(dev) && netif_oper_up(dev)) {
+                       test_and_set_bit_lock(0, &b->up);
                        break;
-               /* else: fall through */
-       case NETDEV_UP:
-               test_and_set_bit_lock(0, &b->up);
-               break;
+               }
+               /* fall through */
        case NETDEV_GOING_DOWN:
                clear_bit_unlock(0, &b->up);
                tipc_reset_bearer(net, b);
                break;
+       case NETDEV_UP:
+               test_and_set_bit_lock(0, &b->up);
+               break;
        case NETDEV_CHANGEMTU:
                if (tipc_mtu_bad(dev, 0)) {
                        bearer_disable(net, b);
index b1f0bee54eacc9eb1974169853abf1ace4df2733..fb886b525d950e18f7ef517bac408272d17e8d4e 100644 (file)
@@ -410,6 +410,11 @@ char *tipc_link_name(struct tipc_link *l)
        return l->name;
 }
 
+u32 tipc_link_state(struct tipc_link *l)
+{
+       return l->state;
+}
+
 /**
  * tipc_link_create - create a new link
  * @n: pointer to associated node
@@ -841,9 +846,14 @@ void tipc_link_reset(struct tipc_link *l)
        l->in_session = false;
        l->session++;
        l->mtu = l->advertised_mtu;
+       spin_lock_bh(&l->wakeupq.lock);
+       spin_lock_bh(&l->inputq->lock);
+       skb_queue_splice_init(&l->wakeupq, l->inputq);
+       spin_unlock_bh(&l->inputq->lock);
+       spin_unlock_bh(&l->wakeupq.lock);
+
        __skb_queue_purge(&l->transmq);
        __skb_queue_purge(&l->deferdq);
-       skb_queue_splice_init(&l->wakeupq, l->inputq);
        __skb_queue_purge(&l->backlogq);
        l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
        l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
@@ -1380,6 +1390,36 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
        __skb_queue_tail(xmitq, skb);
 }
 
+void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
+                                   struct sk_buff_head *xmitq)
+{
+       u32 onode = tipc_own_addr(l->net);
+       struct tipc_msg *hdr, *ihdr;
+       struct sk_buff_head tnlq;
+       struct sk_buff *skb;
+       u32 dnode = l->addr;
+
+       skb_queue_head_init(&tnlq);
+       skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
+                             INT_H_SIZE, BASIC_H_SIZE,
+                             dnode, onode, 0, 0, 0);
+       if (!skb) {
+               pr_warn("%sunable to create tunnel packet\n", link_co_err);
+               return;
+       }
+
+       hdr = buf_msg(skb);
+       msg_set_msgcnt(hdr, 1);
+       msg_set_bearer_id(hdr, l->peer_bearer_id);
+
+       ihdr = (struct tipc_msg *)msg_data(hdr);
+       tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
+                     BASIC_H_SIZE, dnode);
+       msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
+       __skb_queue_tail(&tnlq, skb);
+       tipc_link_xmit(l, &tnlq, xmitq);
+}
+
 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
  * with contents of the link's transmit and backlog queues.
  */
@@ -1476,6 +1516,9 @@ bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
                        return false;
                if (session != curr_session)
                        return false;
+               /* Extra sanity check */
+               if (!link_is_up(l) && msg_ack(hdr))
+                       return false;
                if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
                        return true;
                /* Accept only STATE with new sequence number */
index 7bc494a33fdf1c3cdf8feb04b44db7e6e04a349c..90488c538a4e4edaddfaf441a517f8b29c1d2ebc 100644 (file)
@@ -88,6 +88,8 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
                         struct tipc_link **link);
 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
                           int mtyp, struct sk_buff_head *xmitq);
+void tipc_link_create_dummy_tnl_msg(struct tipc_link *tnl,
+                                   struct sk_buff_head *xmitq);
 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
 int tipc_link_fsm_evt(struct tipc_link *l, int evt);
 bool tipc_link_is_up(struct tipc_link *l);
@@ -107,6 +109,7 @@ u16 tipc_link_rcv_nxt(struct tipc_link *l);
 u16 tipc_link_acked(struct tipc_link *l);
 u32 tipc_link_id(struct tipc_link *l);
 char *tipc_link_name(struct tipc_link *l);
+u32 tipc_link_state(struct tipc_link *l);
 char tipc_link_plane(struct tipc_link *l);
 int tipc_link_prio(struct tipc_link *l);
 int tipc_link_window(struct tipc_link *l);
index 68014f1b69765269236ac0a6d839ca754d1235da..2afc4f8c37a74db4896508283f434909a0151732 100644 (file)
@@ -111,6 +111,7 @@ struct tipc_node {
        int action_flags;
        struct list_head list;
        int state;
+       bool failover_sent;
        u16 sync_point;
        int link_cnt;
        u16 working_links;
@@ -680,6 +681,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
                *slot0 = bearer_id;
                *slot1 = bearer_id;
                tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
+               n->failover_sent = false;
                n->action_flags |= TIPC_NOTIFY_NODE_UP;
                tipc_link_set_active(nl, true);
                tipc_bcast_add_peer(n->net, nl, xmitq);
@@ -911,6 +913,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
        bool reset = true;
        char *if_name;
        unsigned long intv;
+       u16 session;
 
        *dupl_addr = false;
        *respond = false;
@@ -997,9 +1000,10 @@ void tipc_node_check_dest(struct net *net, u32 addr,
                        goto exit;
 
                if_name = strchr(b->name, ':') + 1;
+               get_random_bytes(&session, sizeof(u16));
                if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
                                      b->net_plane, b->mtu, b->priority,
-                                     b->window, mod(tipc_net(net)->random),
+                                     b->window, session,
                                      tipc_own_addr(net), addr, peer_id,
                                      n->capabilities,
                                      tipc_bc_sndlink(n->net), n->bc_entry.link,
@@ -1615,6 +1619,14 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
                        tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
                                                        tipc_link_inputq(l));
                }
+               /* If parallel link was already down, and this happened before
+                * the tunnel link came up, FAILOVER was never sent. Ensure that
+                * FAILOVER is sent to get peer out of NODE_FAILINGOVER state.
+                */
+               if (n->state != NODE_FAILINGOVER && !n->failover_sent) {
+                       tipc_link_create_dummy_tnl_msg(l, xmitq);
+                       n->failover_sent = true;
+               }
                /* If pkts arrive out of order, use lowest calculated syncpt */
                if (less(syncpt, n->sync_point))
                        n->sync_point = syncpt;
index 3f03ddd0e35b2f1b6acad1c788faee1976924b3b..b6f99b021d09b19a6fe0d6776d2c824a913cdacb 100644 (file)
@@ -1419,8 +1419,10 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
        /* Handle implicit connection setup */
        if (unlikely(dest)) {
                rc = __tipc_sendmsg(sock, m, dlen);
-               if (dlen && (dlen == rc))
+               if (dlen && dlen == rc) {
+                       tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
                        tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
+               }
                return rc;
        }
 
index 4b8ec659e797ff743267773e315c6220b90993d0..176edfefcbaa89d826c322eb52cfc4e12129dbd8 100644 (file)
@@ -3756,6 +3756,7 @@ static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
                        return false;
 
                /* check availability */
+               ridx = array_index_nospec(ridx, IEEE80211_HT_MCS_MASK_LEN);
                if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
                        mcs[ridx] |= rbit;
                else
@@ -10230,7 +10231,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        s32 last, low, high;
        u32 hyst;
-       int i, n;
+       int i, n, low_index;
        int err;
 
        /* RSSI reporting disabled? */
@@ -10267,10 +10268,19 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
                if (last < wdev->cqm_config->rssi_thresholds[i])
                        break;
 
-       low = i > 0 ?
-               (wdev->cqm_config->rssi_thresholds[i - 1] - hyst) : S32_MIN;
-       high = i < n ?
-               (wdev->cqm_config->rssi_thresholds[i] + hyst - 1) : S32_MAX;
+       low_index = i - 1;
+       if (low_index >= 0) {
+               low_index = array_index_nospec(low_index, n);
+               low = wdev->cqm_config->rssi_thresholds[low_index] - hyst;
+       } else {
+               low = S32_MIN;
+       }
+       if (i < n) {
+               i = array_index_nospec(i, n);
+               high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1;
+       } else {
+               high = S32_MAX;
+       }
 
        return rdev_set_cqm_rssi_range_config(rdev, dev, low, high);
 }
index 2f702adf2912105947560d07d79ff86119333bbf..765dedb123618c0699537a566d0066d9e9072a58 100644 (file)
@@ -2867,6 +2867,7 @@ static int regulatory_hint_core(const char *alpha2)
        request->alpha2[0] = alpha2[0];
        request->alpha2[1] = alpha2[1];
        request->initiator = NL80211_REGDOM_SET_BY_CORE;
+       request->wiphy_idx = WIPHY_IDX_INVALID;
 
        queue_regulatory_request(request);
 
index d36c3eb7b9311fc75bdaa020aa0318546efd4128..d0e7472dd9fd4b2a8938334129f24a60ea3fb421 100644 (file)
@@ -1058,13 +1058,23 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
        return NULL;
 }
 
+/*
+ * Update RX channel information based on the available frame payload
+ * information. This is mainly for the 2.4 GHz band where frames can be received
+ * from neighboring channels and the Beacon frames use the DSSS Parameter Set
+ * element to indicate the current (transmitting) channel, but this might also
+ * be needed on other bands if RX frequency does not match with the actual
+ * operating channel of a BSS.
+ */
 static struct ieee80211_channel *
 cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
-                        struct ieee80211_channel *channel)
+                        struct ieee80211_channel *channel,
+                        enum nl80211_bss_scan_width scan_width)
 {
        const u8 *tmp;
        u32 freq;
        int channel_number = -1;
+       struct ieee80211_channel *alt_channel;
 
        tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen);
        if (tmp && tmp[1] == 1) {
@@ -1078,16 +1088,45 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
                }
        }
 
-       if (channel_number < 0)
+       if (channel_number < 0) {
+               /* No channel information in frame payload */
                return channel;
+       }
 
        freq = ieee80211_channel_to_frequency(channel_number, channel->band);
-       channel = ieee80211_get_channel(wiphy, freq);
-       if (!channel)
-               return NULL;
-       if (channel->flags & IEEE80211_CHAN_DISABLED)
+       alt_channel = ieee80211_get_channel(wiphy, freq);
+       if (!alt_channel) {
+               if (channel->band == NL80211_BAND_2GHZ) {
+                       /*
+                        * Better not allow unexpected channels when that could
+                        * be going beyond the 1-11 range (e.g., discovering
+                        * BSS on channel 12 when radio is configured for
+                        * channel 11.
+                        */
+                       return NULL;
+               }
+
+               /* No match for the payload channel number - ignore it */
+               return channel;
+       }
+
+       if (scan_width == NL80211_BSS_CHAN_WIDTH_10 ||
+           scan_width == NL80211_BSS_CHAN_WIDTH_5) {
+               /*
+                * Ignore channel number in 5 and 10 MHz channels where there
+                * may not be an n:1 or 1:n mapping between frequencies and
+                * channel numbers.
+                */
+               return channel;
+       }
+
+       /*
+        * Use the channel determined through the payload channel number
+        * instead of the RX channel reported by the driver.
+        */
+       if (alt_channel->flags & IEEE80211_CHAN_DISABLED)
                return NULL;
-       return channel;
+       return alt_channel;
 }
 
 /* Returned bss is reference counted and must be cleaned up appropriately. */
@@ -1112,7 +1151,8 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
                    (data->signal < 0 || data->signal > 100)))
                return NULL;
 
-       channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan);
+       channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan,
+                                          data->scan_width);
        if (!channel)
                return NULL;
 
@@ -1210,7 +1250,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
                return NULL;
 
        channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
-                                          ielen, data->chan);
+                                          ielen, data->chan, data->scan_width);
        if (!channel)
                return NULL;
 
index b89c9c7f8c5c12a13772ae4d838ce9f121bd51f5..be3520e429c9f989a712f7bf32874bed7d3aa667 100644 (file)
@@ -458,6 +458,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
                        goto drop;
                }
+               crypto_done = false;
        } while (!err);
 
        err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
index 45ba07ab3e4f8d322e564c902774706ec09bcf9c..261995d37ced3a8f0c3ae443a4fca7f511e1a3c4 100644 (file)
@@ -100,6 +100,10 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
                spin_unlock_bh(&x->lock);
 
                skb_dst_force(skb);
+               if (!skb_dst(skb)) {
+                       XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
+                       goto error_nolock;
+               }
 
                if (xfrm_offload(skb)) {
                        x->type_offload->encap(x, skb);
index 3110c3fbee2099e7a4563a99c988e5ad66d0658c..f094d4b3520d97773b87baf5700df79fc8ca4666 100644 (file)
@@ -2491,6 +2491,10 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
        }
 
        skb_dst_force(skb);
+       if (!skb_dst(skb)) {
+               XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
+               return 0;
+       }
 
        dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
        if (IS_ERR(dst)) {
index 4791aa8b818583b5fcb5812fd561342fbab2edfa..df7ca2dabc48881eb607089bb17a19b1bc6028b1 100644 (file)
@@ -151,10 +151,16 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
        err = -EINVAL;
        switch (p->family) {
        case AF_INET:
+               if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
+                       goto out;
+
                break;
 
        case AF_INET6:
 #if IS_ENABLED(CONFIG_IPV6)
+               if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
+                       goto out;
+
                break;
 #else
                err = -EAFNOSUPPORT;
@@ -1396,10 +1402,16 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
 
        switch (p->sel.family) {
        case AF_INET:
+               if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
+                       return -EINVAL;
+
                break;
 
        case AF_INET6:
 #if IS_ENABLED(CONFIG_IPV6)
+               if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
+                       return -EINVAL;
+
                break;
 #else
                return  -EAFNOSUPPORT;
@@ -1480,6 +1492,9 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
                    (ut[i].family != prev_family))
                        return -EINVAL;
 
+               if (ut[i].mode >= XFRM_MODE_MAX)
+                       return -EINVAL;
+
                prev_family = ut[i].family;
 
                switch (ut[i].family) {
index 642d4e12abea2e37d92e0acea3fdcf692a64d374..eec2663261f2ac8bfc09085049ea0d908f8851c5 100644 (file)
@@ -56,15 +56,13 @@ unsigned int yield_mod_cnt, nr_abort;
                        printf(fmt, ## __VA_ARGS__);    \
        } while (0)
 
-#if defined(__x86_64__) || defined(__i386__)
+#ifdef __i386__
 
 #define INJECT_ASM_REG "eax"
 
 #define RSEQ_INJECT_CLOBBER \
        , INJECT_ASM_REG
 
-#ifdef __i386__
-
 #define RSEQ_INJECT_ASM(n) \
        "mov asm_loop_cnt_" #n ", %%" INJECT_ASM_REG "\n\t" \
        "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
@@ -76,9 +74,16 @@ unsigned int yield_mod_cnt, nr_abort;
 
 #elif defined(__x86_64__)
 
+#define INJECT_ASM_REG_P       "rax"
+#define INJECT_ASM_REG         "eax"
+
+#define RSEQ_INJECT_CLOBBER \
+       , INJECT_ASM_REG_P \
+       , INJECT_ASM_REG
+
 #define RSEQ_INJECT_ASM(n) \
-       "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG "\n\t" \
-       "mov (%%" INJECT_ASM_REG "), %%" INJECT_ASM_REG "\n\t" \
+       "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG_P "\n\t" \
+       "mov (%%" INJECT_ASM_REG_P "), %%" INJECT_ASM_REG "\n\t" \
        "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
        "jz 333f\n\t" \
        "222:\n\t" \
@@ -86,10 +91,6 @@ unsigned int yield_mod_cnt, nr_abort;
        "jnz 222b\n\t" \
        "333:\n\t"
 
-#else
-#error "Unsupported architecture"
-#endif
-
 #elif defined(__s390__)
 
 #define RSEQ_INJECT_INPUT \