]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge tag 'amd-drm-fixes-5.6-2020-03-19' of git://people.freedesktop.org/~agd5f/linux...
authorDave Airlie <airlied@redhat.com>
Fri, 20 Mar 2020 02:48:10 +0000 (12:48 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 20 Mar 2020 02:48:17 +0000 (12:48 +1000)
amd-drm-fixes-5.6-2020-03-19:

amdgpu:
- Pageflip fix
- VCN clockgating fixes
- GPR debugfs fix for umr
- GPU reset fix
- eDP fix for MBP
- DCN2.x fix

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200319204054.1036478-1-alexander.deucher@amd.com
274 files changed:
.clang-format
Documentation/arm64/silicon-errata.rst
Documentation/devicetree/bindings/net/fsl-fman.txt
Documentation/filesystems/porting.rst
Documentation/networking/devlink/devlink-region.rst
Documentation/networking/net_failover.rst
Documentation/networking/rds.txt
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/configs/nps_defconfig
arch/arc/configs/nsimosci_defconfig
arch/arc/configs/nsimosci_hs_defconfig
arch/arc/configs/nsimosci_hs_smp_defconfig
arch/arc/include/asm/fpu.h
arch/arc/include/asm/linkage.h
arch/arc/kernel/setup.c
arch/arc/kernel/troubleshoot.c
arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
arch/mips/boot/dts/ingenic/ci20.dts
arch/mips/kernel/setup.c
arch/s390/kvm/kvm-s390.c
arch/x86/Makefile
arch/x86/crypto/Makefile
arch/x86/events/amd/uncore.c
arch/x86/include/asm/kvm_emulate.h
arch/x86/kernel/apic/vector.c
arch/x86/kernel/cpu/mce/intel.c
arch/x86/kernel/cpu/mce/therm_throt.c
arch/x86/kvm/Kconfig
arch/x86/kvm/emulate.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/ioremap.c
block/blk-iocost.c
block/blk-mq-sched.c
block/genhd.c
drivers/atm/nicstar.c
drivers/auxdisplay/Kconfig
drivers/auxdisplay/charlcd.c
drivers/auxdisplay/img-ascii-lcd.c
drivers/base/platform.c
drivers/block/virtio_blk.c
drivers/char/ipmi/ipmi_si_platform.c
drivers/clk/clk.c
drivers/clk/qcom/dispcc-sc7180.c
drivers/clk/qcom/videocc-sc7180.c
drivers/firmware/efi/efivars.c
drivers/gpu/drm/arm/display/komeda/komeda_drv.c
drivers/gpu/drm/bochs/bochs_hw.c
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/drm_lease.c
drivers/i2c/busses/i2c-designware-pcidrv.c
drivers/i2c/busses/i2c-gpio.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/i2c-core-acpi.c
drivers/iommu/amd_iommu.c
drivers/iommu/dma-iommu.c
drivers/iommu/dmar.c
drivers/iommu/intel-iommu-debugfs.c
drivers/iommu/intel-iommu.c
drivers/iommu/io-pgtable-arm.c
drivers/irqchip/irq-gic-v3.c
drivers/macintosh/windfarm_ad7417_sensor.c
drivers/macintosh/windfarm_fcu_controls.c
drivers/macintosh/windfarm_lm75_sensor.c
drivers/macintosh/windfarm_lm87_sensor.c
drivers/macintosh/windfarm_max6690_sensor.c
drivers/macintosh/windfarm_smu_sat.c
drivers/misc/eeprom/at24.c
drivers/mmc/core/core.c
drivers/mmc/core/mmc.c
drivers/mmc/core/mmc_ops.c
drivers/mmc/host/sdhci-msm.c
drivers/mmc/host/sdhci-omap.c
drivers/mmc/host/sdhci-pci-gli.c
drivers/mmc/host/sdhci-tegra.c
drivers/net/bonding/bond_alb.c
drivers/net/can/dev.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/global2.c
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fman/Kconfig
drivers/net/ethernet/freescale/fman/fman.c
drivers/net/ethernet/freescale/fman/fman.h
drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/marvell/mvmdio.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/efx_channels.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/sfc/tx_common.c
drivers/net/ethernet/sfc/tx_common.h
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ipvlan/ipvlan_core.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/macsec.c
drivers/net/macvlan.c
drivers/net/phy/bcm63xx.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/slip/slhc.c
drivers/net/team/team.c
drivers/net/usb/r8152.c
drivers/net/veth.c
drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
drivers/net/wireless/mediatek/mt76/dma.c
drivers/of/of_mdio.c
drivers/pinctrl/cirrus/pinctrl-madera-core.c
drivers/pinctrl/core.c
drivers/pinctrl/freescale/pinctrl-scu.c
drivers/pinctrl/meson/pinctrl-meson-gxl.c
drivers/pinctrl/pinctrl-falcon.c
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
drivers/s390/block/dasd.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_int.h
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_sys.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/qeth_l3_sys.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/ufs/ufshcd.c
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_ring.c
drivers/watchdog/iTCO_vendor.h
drivers/watchdog/iTCO_vendor_support.c
drivers/watchdog/iTCO_wdt.c
fs/afs/addr_list.c
fs/afs/internal.h
fs/cifs/dir.c
fs/crypto/keysetup.c
fs/fuse/dev.c
fs/fuse/fuse_i.h
fs/gfs2/inode.c
fs/inode.c
fs/io_uring.c
fs/nfs/client.c
fs/nfs/fs_context.c
fs/nfs/fscache.c
fs/nfs/namespace.c
fs/nfs/nfs4client.c
fs/open.c
fs/overlayfs/Kconfig
fs/overlayfs/file.c
fs/overlayfs/overlayfs.h
fs/overlayfs/super.c
fs/overlayfs/util.c
include/crypto/curve25519.h
include/dt-bindings/clock/imx8mn-clock.h
include/linux/cgroup.h
include/linux/dmar.h
include/linux/fs.h
include/linux/futex.h
include/linux/genhd.h
include/linux/inet_diag.h
include/linux/intel-iommu.h
include/linux/mmc/host.h
include/linux/of_clk.h
include/linux/phy.h
include/linux/platform_device.h
include/linux/rhashtable.h
include/linux/workqueue.h
include/net/fib_rules.h
include/soc/mscc/ocelot_dev.h
include/uapi/linux/in.h
kernel/cgroup/cgroup-v1.c
kernel/cgroup/cgroup.c
kernel/futex.c
kernel/pid.c
kernel/sys.c
kernel/trace/ftrace.c
kernel/workqueue.c
mm/memcontrol.c
net/batman-adv/bat_iv_ogm.c
net/caif/caif_dev.c
net/core/devlink.c
net/core/netclassid_cgroup.c
net/core/sock.c
net/dsa/dsa_priv.h
net/dsa/port.c
net/dsa/slave.c
net/ieee802154/nl_policy.c
net/ipv4/gre_demux.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/raw_diag.c
net/ipv4/udp_diag.c
net/ipv6/addrconf.c
net/ipv6/seg6_iptunnel.c
net/ipv6/seg6_local.c
net/mac80211/mesh_hwmp.c
net/mptcp/options.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_synproxy_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nft_chain_nat.c
net/netfilter/nft_payload.c
net/netfilter/nft_tunnel.c
net/netfilter/x_tables.c
net/netfilter/xt_recent.c
net/netlink/af_netlink.c
net/nfc/hci/core.c
net/nfc/netlink.c
net/openvswitch/datapath.c
net/packet/af_packet.c
net/sched/sch_fq.c
net/sched/sch_taprio.c
net/sctp/diag.c
net/smc/smc_ib.c
net/tipc/netlink.c
net/wireless/nl80211.c
tools/include/uapi/asm/errno.h
tools/perf/arch/arm64/util/arm-spe.c
tools/perf/arch/arm64/util/perf_regs.c
tools/perf/arch/powerpc/util/perf_regs.c
tools/perf/arch/x86/util/auxtrace.c
tools/perf/arch/x86/util/event.c
tools/perf/arch/x86/util/header.c
tools/perf/arch/x86/util/intel-bts.c
tools/perf/arch/x86/util/intel-pt.c
tools/perf/arch/x86/util/machine.c
tools/perf/arch/x86/util/perf_regs.c
tools/perf/arch/x86/util/pmu.c
tools/perf/bench/bench.h
tools/perf/bench/epoll-ctl.c
tools/perf/bench/epoll-wait.c
tools/perf/bench/futex-hash.c
tools/perf/bench/futex-lock-pi.c
tools/perf/bench/futex-requeue.c
tools/perf/bench/futex-wake-parallel.c
tools/perf/bench/futex-wake.c
tools/perf/builtin-diff.c
tools/perf/builtin-top.c
tools/perf/pmu-events/jevents.c
tools/perf/tests/bp_account.c
tools/perf/util/block-info.c
tools/perf/util/env.c
tools/perf/util/map.c
tools/perf/util/parse-events.c
tools/perf/util/symbol.c
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
tools/testing/ktest/ktest.pl
tools/testing/ktest/sample.conf
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/tc-testing/config

index 196ca317bd1f24ad57ad9ded549bc0b7994d8111..6ec5558b516bd909e2bb266208e08fdffee4364e 100644 (file)
@@ -86,6 +86,8 @@ ForEachMacros:
   - 'bio_for_each_segment_all'
   - 'bio_list_for_each'
   - 'bip_for_each_vec'
+  - 'bitmap_for_each_clear_region'
+  - 'bitmap_for_each_set_region'
   - 'blkg_for_each_descendant_post'
   - 'blkg_for_each_descendant_pre'
   - 'blk_queue_for_each_rl'
@@ -115,6 +117,7 @@ ForEachMacros:
   - 'drm_client_for_each_connector_iter'
   - 'drm_client_for_each_modeset'
   - 'drm_connector_for_each_possible_encoder'
+  - 'drm_for_each_bridge_in_chain'
   - 'drm_for_each_connector_iter'
   - 'drm_for_each_crtc'
   - 'drm_for_each_encoder'
@@ -136,9 +139,10 @@ ForEachMacros:
   - 'for_each_bio'
   - 'for_each_board_func_rsrc'
   - 'for_each_bvec'
+  - 'for_each_card_auxs'
+  - 'for_each_card_auxs_safe'
   - 'for_each_card_components'
-  - 'for_each_card_links'
-  - 'for_each_card_links_safe'
+  - 'for_each_card_pre_auxs'
   - 'for_each_card_prelinks'
   - 'for_each_card_rtds'
   - 'for_each_card_rtds_safe'
@@ -166,6 +170,7 @@ ForEachMacros:
   - 'for_each_dpcm_fe'
   - 'for_each_drhd_unit'
   - 'for_each_dss_dev'
+  - 'for_each_efi_handle'
   - 'for_each_efi_memory_desc'
   - 'for_each_efi_memory_desc_in_map'
   - 'for_each_element'
@@ -190,6 +195,7 @@ ForEachMacros:
   - 'for_each_lru'
   - 'for_each_matching_node'
   - 'for_each_matching_node_and_match'
+  - 'for_each_member'
   - 'for_each_memblock'
   - 'for_each_memblock_type'
   - 'for_each_memcg_cache_index'
@@ -200,9 +206,11 @@ ForEachMacros:
   - 'for_each_msi_entry'
   - 'for_each_msi_entry_safe'
   - 'for_each_net'
+  - 'for_each_net_continue_reverse'
   - 'for_each_netdev'
   - 'for_each_netdev_continue'
   - 'for_each_netdev_continue_rcu'
+  - 'for_each_netdev_continue_reverse'
   - 'for_each_netdev_feature'
   - 'for_each_netdev_in_bond_rcu'
   - 'for_each_netdev_rcu'
@@ -254,10 +262,10 @@ ForEachMacros:
   - 'for_each_reserved_mem_region'
   - 'for_each_rtd_codec_dai'
   - 'for_each_rtd_codec_dai_rollback'
-  - 'for_each_rtdcom'
-  - 'for_each_rtdcom_safe'
+  - 'for_each_rtd_components'
   - 'for_each_set_bit'
   - 'for_each_set_bit_from'
+  - 'for_each_set_clump8'
   - 'for_each_sg'
   - 'for_each_sg_dma_page'
   - 'for_each_sg_page'
@@ -267,6 +275,7 @@ ForEachMacros:
   - 'for_each_subelement_id'
   - '__for_each_thread'
   - 'for_each_thread'
+  - 'for_each_wakeup_source'
   - 'for_each_zone'
   - 'for_each_zone_zonelist'
   - 'for_each_zone_zonelist_nodemask'
@@ -330,6 +339,7 @@ ForEachMacros:
   - 'list_for_each'
   - 'list_for_each_codec'
   - 'list_for_each_codec_safe'
+  - 'list_for_each_continue'
   - 'list_for_each_entry'
   - 'list_for_each_entry_continue'
   - 'list_for_each_entry_continue_rcu'
@@ -351,6 +361,7 @@ ForEachMacros:
   - 'llist_for_each_entry'
   - 'llist_for_each_entry_safe'
   - 'llist_for_each_safe'
+  - 'mci_for_each_dimm'
   - 'media_device_for_each_entity'
   - 'media_device_for_each_intf'
   - 'media_device_for_each_link'
@@ -444,10 +455,16 @@ ForEachMacros:
   - 'virtio_device_for_each_vq'
   - 'xa_for_each'
   - 'xa_for_each_marked'
+  - 'xa_for_each_range'
   - 'xa_for_each_start'
   - 'xas_for_each'
   - 'xas_for_each_conflict'
   - 'xas_for_each_marked'
+  - 'xbc_array_for_each_value'
+  - 'xbc_for_each_key_value'
+  - 'xbc_node_for_each_array_value'
+  - 'xbc_node_for_each_child'
+  - 'xbc_node_for_each_key_value'
   - 'zorro_for_each_dev'
 
 #IncludeBlocks: Preserve # Unknown to clang-format-5.0
index 9120e59578dcaaf24e5633cd0c1a2ec37b1ea278..2c08c628febdf3c7a17c2129047d9f53e246a779 100644 (file)
@@ -110,6 +110,8 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | Cavium         | ThunderX GICv3  | #23154          | CAVIUM_ERRATUM_23154        |
 +----------------+-----------------+-----------------+-----------------------------+
+| Cavium         | ThunderX GICv3  | #38539          | N/A                         |
++----------------+-----------------+-----------------+-----------------------------+
 | Cavium         | ThunderX Core   | #27456          | CAVIUM_ERRATUM_27456        |
 +----------------+-----------------+-----------------+-----------------------------+
 | Cavium         | ThunderX Core   | #30115          | CAVIUM_ERRATUM_30115        |
index 250f8d8cdce4bc17d0acc1bdb3ee2bb7ecf3fb34..c00fb0d22c7b2355f47482d1fa93a2c118bacee3 100644 (file)
@@ -110,6 +110,13 @@ PROPERTIES
                Usage: required
                Definition: See soc/fsl/qman.txt and soc/fsl/bman.txt
 
+- fsl,erratum-a050385
+               Usage: optional
+               Value type: boolean
+               Definition: A boolean property. Indicates the presence of the
+               erratum A050385 which indicates that DMA transactions that are
+               split can result in a FMan lock.
+
 =============================================================================
 FMan MURAM Node
 
index f18506083ced831cf0ceef4870d964144152cd99..26c093969573639fc815803aa275d609057c7bfc 100644 (file)
@@ -850,3 +850,11 @@ business doing so.
 d_alloc_pseudo() is internal-only; uses outside of alloc_file_pseudo() are
 very suspect (and won't work in modules).  Such uses are very likely to
 be misspelled d_alloc_anon().
+
+---
+
+**mandatory**
+
+[should've been added in 2016] stale comment in finish_open() nonwithstanding,
+failure exits in ->atomic_open() instances should *NOT* fput() the file,
+no matter what.  Everything is handled by the caller.
index 1a7683e7acb217d9782af60eb5bedd9b6e6631e5..8b46e8591fe0f7fbc9b43d1f5f3f9e0e25250728 100644 (file)
@@ -40,9 +40,6 @@ example usage
     # Delete a snapshot using:
     $ devlink region del pci/0000:00:05.0/cr-space snapshot 1
 
-    # Trigger (request) a snapshot be taken:
-    $ devlink region trigger pci/0000:00:05.0/cr-space
-
     # Dump a snapshot:
     $ devlink region dump pci/0000:00:05.0/fw-health snapshot 1
     0000000000000000 0014 95dc 0014 9514 0035 1670 0034 db30
index 06c97dcb57caee07743c55d7a500b2a42f1ff0a8..e143ab79a960d429a431993690a7db76d0e2afe9 100644 (file)
@@ -8,9 +8,9 @@ Overview
 ========
 
 The net_failover driver provides an automated failover mechanism via APIs
-to create and destroy a failover master netdev and mananges a primary and
+to create and destroy a failover master netdev and manages a primary and
 standby slave netdevs that get registered via the generic failover
-infrastructrure.
+infrastructure.
 
 The failover netdev acts a master device and controls 2 slave devices. The
 original paravirtual interface is registered as 'standby' slave netdev and
@@ -29,7 +29,7 @@ virtio-net accelerated datapath: STANDBY mode
 =============================================
 
 net_failover enables hypervisor controlled accelerated datapath to virtio-net
-enabled VMs in a transparent manner with no/minimal guest userspace chanages.
+enabled VMs in a transparent manner with no/minimal guest userspace changes.
 
 To support this, the hypervisor needs to enable VIRTIO_NET_F_STANDBY
 feature on the virtio-net interface and assign the same MAC address to both
index f2a0147c933d1b7be6f9b09de38de2e8994e6e2f..eec61694e894d2728f39c23eb5efe88558aa72c0 100644 (file)
@@ -159,7 +159,7 @@ Socket Interface
        set SO_RDS_TRANSPORT on a socket for which the transport has
        been previously attached explicitly (by SO_RDS_TRANSPORT) or
        implicitly (via bind(2)) will return an error of EOPNOTSUPP.
-       An attempt to set SO_RDS_TRANSPPORT to RDS_TRANS_NONE will
+       An attempt to set SO_RDS_TRANSPORT to RDS_TRANS_NONE will
        always return EINVAL.
 
 RDMA for RDS
index a6fbdf354d3437a50109102244c3a39c70ef5a16..cc1d18cb5d186e2995b5202d1a63903ed10f0183 100644 (file)
@@ -4073,7 +4073,6 @@ F:        drivers/scsi/snic/
 CISCO VIC ETHERNET NIC DRIVER
 M:     Christian Benvenuti <benve@cisco.com>
 M:     Govindarajulu Varadarajan <_govind@gmx.com>
-M:     Parvi Kaustubhi <pkaustub@cisco.com>
 S:     Supported
 F:     drivers/net/ethernet/cisco/enic/
 
@@ -4572,7 +4571,7 @@ F:        drivers/infiniband/hw/cxgb4/
 F:     include/uapi/rdma/cxgb4-abi.h
 
 CXGB4VF ETHERNET DRIVER (CXGB4VF)
-M:     Casey Leedom <leedom@chelsio.com>
+M:     Vishal Kulkarni <vishal@gmail.com>
 L:     netdev@vger.kernel.org
 W:     http://www.chelsio.com
 S:     Supported
@@ -6198,7 +6197,6 @@ S:        Supported
 F:     drivers/scsi/be2iscsi/
 
 Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net)
-M:     Sathya Perla <sathya.perla@broadcom.com>
 M:     Ajit Khaparde <ajit.khaparde@broadcom.com>
 M:     Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
 M:     Somnath Kotur <somnath.kotur@broadcom.com>
@@ -11119,7 +11117,7 @@ M:      Thomas Bogendoerfer <tsbogend@alpha.franken.de>
 L:     linux-mips@vger.kernel.org
 W:     http://www.linux-mips.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git
-Q:     http://patchwork.linux-mips.org/project/linux-mips/list/
+Q:     https://patchwork.kernel.org/project/linux-mips/list/
 S:     Maintained
 F:     Documentation/devicetree/bindings/mips/
 F:     Documentation/mips/
index e25db579ce7438ab608d4473dc57dfc110ecaeee..171f2b004c8a4b001d3958660e8233b56b03b181 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 6
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
index ff2a393b635c5681f5ce6c9edd1a1d20efd6ea10..7124ab82dfa31fb32f9701419fcd199e9f2569c2 100644 (file)
@@ -154,7 +154,7 @@ config ARC_CPU_HS
        help
          Support for ARC HS38x Cores based on ARCv2 ISA
          The notable features are:
-           - SMP configurations of upto 4 core with coherency
+           - SMP configurations of up to 4 cores with coherency
            - Optional L2 Cache and IO-Coherency
            - Revised Interrupt Architecture (multiple priorites, reg banks,
                auto stack switch, auto regfile save/restore)
@@ -192,7 +192,7 @@ config ARC_SMP_HALT_ON_RESET
        help
          In SMP configuration cores can be configured as Halt-on-reset
          or they could all start at same time. For Halt-on-reset, non
-         masters are parked until Master kicks them so they can start of
+         masters are parked until Master kicks them so they can start off
          at designated entry point. For other case, all jump to common
          entry point and spin wait for Master's signal.
 
index 07f26ed39f024158466f2b5d6f4c0cb2cf115060..f7a978dfdf1d3668660aad6c29468fa7e97fe06c 100644 (file)
@@ -21,8 +21,6 @@ CONFIG_MODULES=y
 CONFIG_MODULE_FORCE_LOAD=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_ARC_PLAT_EZNPS=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=4096
index 5dd470b6609ebf2e061e1b48e98c86f5ef175056..bf39a0091679c766fa54f7670b4d351bf50dc6c8 100644 (file)
@@ -20,8 +20,6 @@ CONFIG_ISA_ARCOMPACT=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci"
 # CONFIG_COMPACTION is not set
 CONFIG_NET=y
index 3532e86f7bff690b4906d8c7f9ea8e9d6a5db117..7121bd71c543ad3232e2ca97eb01a7ddda1e9921 100644 (file)
@@ -19,8 +19,6 @@ CONFIG_PERF_EVENTS=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_ISA_ARCV2=y
 CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs"
 # CONFIG_COMPACTION is not set
index d90448bee064f3604631966dcd0e67253a44226f..f9863b294a707ef569bf36aa08592bf8ca48b5f2 100644 (file)
@@ -14,8 +14,6 @@ CONFIG_PERF_EVENTS=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_ISA_ARCV2=y
 CONFIG_SMP=y
 # CONFIG_ARC_TIMERS_64BIT is not set
index 64347250fdf55e58453e90982e90182ee68b1fc4..006bcf88a7a5f21284ddb2fb015d4a75924b4490 100644 (file)
@@ -43,6 +43,8 @@ extern void fpu_init_task(struct pt_regs *regs);
 
 #endif /* !CONFIG_ISA_ARCOMPACT */
 
+struct task_struct;
+
 extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
 
 #else  /* !CONFIG_ARC_FPU_SAVE_RESTORE */
index d9ee43c6b7dbc2c698960365e8bbe78d0f087ddb..fe19f1d412e71896b324e28278fc52cae13e1a38 100644 (file)
@@ -29,6 +29,8 @@
 .endm
 
 #define ASM_NL          `      /* use '`' to mark new line in macro */
+#define __ALIGN                .align 4
+#define __ALIGN_STR    __stringify(__ALIGN)
 
 /* annotation for data we want in DCCM - if enabled in .config */
 .macro ARCFP_DATA nm
index e1c647490f00e91069671f5aeb9871830cb9be83..aa41af6ef4ac6eccca426d72810c726423391de8 100644 (file)
@@ -8,11 +8,11 @@
 #include <linux/delay.h>
 #include <linux/root_dev.h>
 #include <linux/clk.h>
-#include <linux/clk-provider.h>
 #include <linux/clocksource.h>
 #include <linux/console.h>
 #include <linux/module.h>
 #include <linux/cpu.h>
+#include <linux/of_clk.h>
 #include <linux/of_fdt.h>
 #include <linux/of.h>
 #include <linux/cache.h>
index b79886a6cec8a72398720fa839df2e2917aa523e..d2999503fb8a5f1095419a673bd317f4e4690c6c 100644 (file)
@@ -104,8 +104,7 @@ static void show_faulting_vma(unsigned long address)
                        if (IS_ERR(nm))
                                nm = "?";
                }
-               pr_info("    @off 0x%lx in [%s]\n"
-                       "    VMA: 0x%08lx to 0x%08lx\n",
+               pr_info("  @off 0x%lx in [%s]  VMA: 0x%08lx to 0x%08lx\n",
                        vma->vm_start < TASK_UNMAPPED_BASE ?
                                address : address - vma->vm_start,
                        nm, vma->vm_start, vma->vm_end);
@@ -120,8 +119,6 @@ static void show_ecr_verbose(struct pt_regs *regs)
        unsigned int vec, cause_code;
        unsigned long address;
 
-       pr_info("\n[ECR   ]: 0x%08lx => ", regs->event);
-
        /* For Data fault, this is data address not instruction addr */
        address = current->thread.fault_address;
 
@@ -130,10 +127,10 @@ static void show_ecr_verbose(struct pt_regs *regs)
 
        /* For DTLB Miss or ProtV, display the memory involved too */
        if (vec == ECR_V_DTLB_MISS) {
-               pr_cont("Invalid %s @ 0x%08lx by insn @ 0x%08lx\n",
+               pr_cont("Invalid %s @ 0x%08lx by insn @ %pS\n",
                       (cause_code == 0x01) ? "Read" :
                       ((cause_code == 0x02) ? "Write" : "EX"),
-                      address, regs->ret);
+                      address, (void *)regs->ret);
        } else if (vec == ECR_V_ITLB_MISS) {
                pr_cont("Insn could not be fetched\n");
        } else if (vec == ECR_V_MACH_CHK) {
@@ -191,31 +188,31 @@ void show_regs(struct pt_regs *regs)
 
        show_ecr_verbose(regs);
 
-       pr_info("[EFA   ]: 0x%08lx\n[BLINK ]: %pS\n[ERET  ]: %pS\n",
-               current->thread.fault_address,
-               (void *)regs->blink, (void *)regs->ret);
-
        if (user_mode(regs))
                show_faulting_vma(regs->ret); /* faulting code, not data */
 
-       pr_info("[STAT32]: 0x%08lx", regs->status32);
+       pr_info("ECR: 0x%08lx EFA: 0x%08lx ERET: 0x%08lx\n",
+               regs->event, current->thread.fault_address, regs->ret);
+
+       pr_info("STAT32: 0x%08lx", regs->status32);
 
 #define STS_BIT(r, bit)        r->status32 & STATUS_##bit##_MASK ? #bit" " : ""
 
 #ifdef CONFIG_ISA_ARCOMPACT
-       pr_cont(" : %2s%2s%2s%2s%2s%2s%2s\n",
+       pr_cont(" [%2s%2s%2s%2s%2s%2s%2s]",
                        (regs->status32 & STATUS_U_MASK) ? "U " : "K ",
                        STS_BIT(regs, DE), STS_BIT(regs, AE),
                        STS_BIT(regs, A2), STS_BIT(regs, A1),
                        STS_BIT(regs, E2), STS_BIT(regs, E1));
 #else
-       pr_cont(" : %2s%2s%2s%2s\n",
+       pr_cont(" [%2s%2s%2s%2s]",
                        STS_BIT(regs, IE),
                        (regs->status32 & STATUS_U_MASK) ? "U " : "K ",
                        STS_BIT(regs, DE), STS_BIT(regs, AE));
 #endif
-       pr_info("BTA: 0x%08lx\t SP: 0x%08lx\t FP: 0x%08lx\n",
-               regs->bta, regs->sp, regs->fp);
+       pr_cont("  BTA: 0x%08lx\n", regs->bta);
+       pr_info("BLK: %pS\n SP: 0x%08lx  FP: 0x%08lx\n",
+               (void *)regs->blink, regs->sp, regs->fp);
        pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
               regs->lp_start, regs->lp_end, regs->lp_count);
 
index 6082ae02213642878e5a659c51fc6fe7fd506df2..d237162a874462604ba4a170cf068809d0d34836 100644 (file)
@@ -20,6 +20,8 @@ &soc {
 };
 
 &fman0 {
+       fsl,erratum-a050385;
+
        /* these aliases provide the FMan ports mapping */
        enet0: ethernet@e0000 {
        };
index 37b93166bf22d17947aa878657d269d84bbdd097..c340f947baa03a4d5e0cb8e0f79a3a72aeccd4e8 100644 (file)
@@ -4,6 +4,8 @@
 #include "jz4780.dtsi"
 #include <dt-bindings/clock/ingenic,tcu.h>
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/regulator/active-semi,8865-regulator.h>
 
 / {
        compatible = "img,ci20", "ingenic,jz4780";
@@ -163,63 +165,71 @@ act8600: act8600@5a {
 
                regulators {
                        vddcore: SUDCDC1 {
-                               regulator-name = "VDDCORE";
+                               regulator-name = "DCDC_REG1";
                                regulator-min-microvolt = <1100000>;
                                regulator-max-microvolt = <1100000>;
                                regulator-always-on;
                        };
                        vddmem: SUDCDC2 {
-                               regulator-name = "VDDMEM";
+                               regulator-name = "DCDC_REG2";
                                regulator-min-microvolt = <1500000>;
                                regulator-max-microvolt = <1500000>;
                                regulator-always-on;
                        };
                        vcc_33: SUDCDC3 {
-                               regulator-name = "VCC33";
+                               regulator-name = "DCDC_REG3";
                                regulator-min-microvolt = <3300000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-always-on;
                        };
                        vcc_50: SUDCDC4 {
-                               regulator-name = "VCC50";
+                               regulator-name = "SUDCDC_REG4";
                                regulator-min-microvolt = <5000000>;
                                regulator-max-microvolt = <5000000>;
                                regulator-always-on;
                        };
                        vcc_25: LDO_REG5 {
-                               regulator-name = "VCC25";
+                               regulator-name = "LDO_REG5";
                                regulator-min-microvolt = <2500000>;
                                regulator-max-microvolt = <2500000>;
                                regulator-always-on;
                        };
                        wifi_io: LDO_REG6 {
-                               regulator-name = "WIFIIO";
+                               regulator-name = "LDO_REG6";
                                regulator-min-microvolt = <2500000>;
                                regulator-max-microvolt = <2500000>;
                                regulator-always-on;
                        };
                        vcc_28: LDO_REG7 {
-                               regulator-name = "VCC28";
+                               regulator-name = "LDO_REG7";
                                regulator-min-microvolt = <2800000>;
                                regulator-max-microvolt = <2800000>;
                                regulator-always-on;
                        };
                        vcc_15: LDO_REG8 {
-                               regulator-name = "VCC15";
+                               regulator-name = "LDO_REG8";
                                regulator-min-microvolt = <1500000>;
                                regulator-max-microvolt = <1500000>;
                                regulator-always-on;
                        };
-                       vcc_18: LDO_REG9 {
-                               regulator-name = "VCC18";
-                               regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <1800000>;
+                       vrtc_18: LDO_REG9 {
+                               regulator-name = "LDO_REG9";
+                               /* Despite the datasheet stating 3.3V
+                                * for REG9 and the driver expecting that,
+                                * REG9 outputs 1.8V.
+                                * Likely the CI20 uses a proprietary
+                                * factory programmed chip variant.
+                                * Since this is a simple on/off LDO the
+                                * exact values do not matter.
+                                */
+                               regulator-min-microvolt = <3300000>;
+                               regulator-max-microvolt = <3300000>;
                                regulator-always-on;
                        };
                        vcc_11: LDO_REG10 {
-                               regulator-name = "VCC11";
-                               regulator-min-microvolt = <1100000>;
-                               regulator-max-microvolt = <1100000>;
+                               regulator-name = "LDO_REG10";
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <1200000>;
                                regulator-always-on;
                        };
                };
@@ -261,7 +271,9 @@ &i2c4 {
                rtc@51 {
                        compatible = "nxp,pcf8563";
                        reg = <0x51>;
-                       interrupts = <110>;
+
+                       interrupt-parent = <&gpf>;
+                       interrupts = <30 IRQ_TYPE_LEVEL_LOW>;
                };
 };
 
index 1ac2752fb7919ea26fb1caf5470ea3887887beb0..a7b469d89e2cc5e9679136d48a582356d56e0398 100644 (file)
@@ -605,7 +605,8 @@ static void __init bootcmdline_init(char **cmdline_p)
         * If we're configured to take boot arguments from DT, look for those
         * now.
         */
-       if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB))
+       if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
+           IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
                of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
 #endif
 
index d7ff30e45589935890ad1c1f67c0f36eaa4d480e..c2e6d4ba4e2369db946ecb6339045d91c1715c88 100644 (file)
@@ -3268,7 +3268,10 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
        /* Initial reset is a superset of the normal reset */
        kvm_arch_vcpu_ioctl_normal_reset(vcpu);
 
-       /* this equals initial cpu reset in pop, but we don't switch to ESA */
+       /*
+        * This equals initial cpu reset in pop, but we don't switch to ESA.
+        * We do not only reset the internal data, but also ...
+        */
        vcpu->arch.sie_block->gpsw.mask = 0;
        vcpu->arch.sie_block->gpsw.addr = 0;
        kvm_s390_set_prefix(vcpu, 0);
@@ -3278,6 +3281,19 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
        memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
        vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
        vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
+
+       /* ... the data in sync regs */
+       memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
+       vcpu->run->s.regs.ckc = 0;
+       vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
+       vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
+       vcpu->run->psw_addr = 0;
+       vcpu->run->psw_mask = 0;
+       vcpu->run->s.regs.todpr = 0;
+       vcpu->run->s.regs.cputm = 0;
+       vcpu->run->s.regs.ckc = 0;
+       vcpu->run->s.regs.pp = 0;
+       vcpu->run->s.regs.gbea = 1;
        vcpu->run->s.regs.fpc = 0;
        vcpu->arch.sie_block->gbea = 1;
        vcpu->arch.sie_block->pp = 0;
index 94df0868804bcb4b9b37f7abf4985ac95158c4f8..513a55562d7508eaf75d3efc954b0a786956bbd1 100644 (file)
@@ -194,9 +194,10 @@ avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
 avx512_instr :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,-DCONFIG_AS_AVX512=1)
 sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI=1)
 sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1)
+adx_instr := $(call as-instr,adox %r10$(comma)%r10,-DCONFIG_AS_ADX=1)
 
-KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr)
-KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr)
+KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr)
+KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr)
 
 KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
 
index b69e00bf20b82e4653cb7b563aa132efa43f0838..8c2e9eadee8a0ff349c324cdfc310e6910fad1b8 100644 (file)
@@ -11,6 +11,7 @@ avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
 avx512_supported :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,yes,no)
 sha1_ni_supported :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,yes,no)
 sha256_ni_supported :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,yes,no)
+adx_supported := $(call as-instr,adox %r10$(comma)%r10,yes,no)
 
 obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o
 
@@ -39,7 +40,11 @@ obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o
 
 obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o
 obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o
-obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o
+
+# These modules require the assembler to support ADX.
+ifeq ($(adx_supported),yes)
+       obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o
+endif
 
 # These modules require assembler to support AVX.
 ifeq ($(avx_supported),yes)
index a6ea07f2aa8482d6ea0835b4a745245c039b352e..4d867a752f0ecd0f79656341edfba56fc1d40fc3 100644 (file)
@@ -190,15 +190,12 @@ static int amd_uncore_event_init(struct perf_event *event)
 
        /*
         * NB and Last level cache counters (MSRs) are shared across all cores
-        * that share the same NB / Last level cache. Interrupts can be directed
-        * to a single target core, however, event counts generated by processes
-        * running on other cores cannot be masked out. So we do not support
-        * sampling and per-thread events.
+        * that share the same NB / Last level cache.  On family 16h and below,
+        * Interrupts can be directed to a single target core, however, event
+        * counts generated by processes running on other cores cannot be masked
+        * out. So we do not support sampling and per-thread events via
+        * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
         */
-       if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
-               return -EINVAL;
-
-       /* and we do not enable counter overflow interrupts */
        hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
        hwc->idx = -1;
 
@@ -306,7 +303,7 @@ static struct pmu amd_nb_pmu = {
        .start          = amd_uncore_start,
        .stop           = amd_uncore_stop,
        .read           = amd_uncore_read,
-       .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
+       .capabilities   = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
 };
 
 static struct pmu amd_llc_pmu = {
@@ -317,7 +314,7 @@ static struct pmu amd_llc_pmu = {
        .start          = amd_uncore_start,
        .stop           = amd_uncore_stop,
        .read           = amd_uncore_read,
-       .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
+       .capabilities   = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
 };
 
 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
index 2a8f2bd2e5cfe846cd82073fe7677695b05277e9..c06e8353efd3297ee44878b91f58798dd04b0664 100644 (file)
@@ -360,7 +360,6 @@ struct x86_emulate_ctxt {
        u64 d;
        unsigned long _eip;
        struct operand memop;
-       /* Fields above regs are cleared together. */
        unsigned long _regs[NR_VCPU_REGS];
        struct operand *memopp;
        struct fetch_cache fetch;
index 2c5676b0a6e7f72c70e6fbab2a8ae485e14e9220..48293d15f1e1d69705dfb7d0d1d780256db78e7c 100644 (file)
@@ -838,13 +838,15 @@ static void free_moved_vector(struct apic_chip_data *apicd)
        bool managed = apicd->is_managed;
 
        /*
-        * This should never happen. Managed interrupts are not
-        * migrated except on CPU down, which does not involve the
-        * cleanup vector. But try to keep the accounting correct
-        * nevertheless.
+        * Managed interrupts are usually not migrated away
+        * from an online CPU, but CPU isolation 'managed_irq'
+        * can make that happen.
+        * 1) Activation does not take the isolation into account
+        *    to keep the code simple
+        * 2) Migration away from an isolated CPU can happen when
+        *    a non-isolated CPU which is in the calculated
+        *    affinity mask comes online.
         */
-       WARN_ON_ONCE(managed);
-
        trace_vector_free_moved(apicd->irq, cpu, vector, managed);
        irq_matrix_free(vector_matrix, cpu, vector, managed);
        per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
index 5627b1091b85639eb800a592d882832d81d1a941..f996ffb887bc09ca2ea64d221f14ef86f3c0006a 100644 (file)
@@ -493,17 +493,18 @@ static void intel_ppin_init(struct cpuinfo_x86 *c)
                        return;
 
                if ((val & 3UL) == 1UL) {
-                       /* PPIN available but disabled: */
+                       /* PPIN locked in disabled mode */
                        return;
                }
 
-               /* If PPIN is disabled, but not locked, try to enable: */
-               if (!(val & 3UL)) {
+               /* If PPIN is disabled, try to enable */
+               if (!(val & 2UL)) {
                        wrmsrl_safe(MSR_PPIN_CTL,  val | 2UL);
                        rdmsrl_safe(MSR_PPIN_CTL, &val);
                }
 
-               if ((val & 3UL) == 2UL)
+               /* Is the enable bit set? */
+               if (val & 2UL)
                        set_cpu_cap(c, X86_FEATURE_INTEL_PPIN);
        }
 }
index 58b4ee3cda7774c096cbbac4841976344bbe79a0..f36dc07420851b90759349d7ffe3caf3eac34012 100644 (file)
@@ -486,9 +486,14 @@ static int thermal_throttle_offline(unsigned int cpu)
 {
        struct thermal_state *state = &per_cpu(thermal_state, cpu);
        struct device *dev = get_cpu_device(cpu);
+       u32 l;
+
+       /* Mask the thermal vector before draining evtl. pending work */
+       l = apic_read(APIC_LVTTHMR);
+       apic_write(APIC_LVTTHMR, l | APIC_LVT_MASKED);
 
-       cancel_delayed_work(&state->package_throttle.therm_work);
-       cancel_delayed_work(&state->core_throttle.therm_work);
+       cancel_delayed_work_sync(&state->package_throttle.therm_work);
+       cancel_delayed_work_sync(&state->core_throttle.therm_work);
 
        state->package_throttle.rate_control_active = false;
        state->core_throttle.rate_control_active = false;
index 1bb4927030afd85081a862544c7e29717fdc0fca..9fea0757db9226653c611233470ae092fda6e762 100644 (file)
@@ -68,7 +68,7 @@ config KVM_WERROR
        depends on (X86_64 && !KASAN) || !COMPILE_TEST
        depends on EXPERT
        help
-         Add -Werror to the build flags for (and only for) i915.ko.
+         Add -Werror to the build flags for KVM.
 
          If in doubt, say "N".
 
index dd19fb3539e0b4b7d1c51581b90695c66d79abe2..bc00642e5d3b7b158cb410ed6d39a1724661904b 100644 (file)
@@ -5173,6 +5173,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
        ctxt->fetch.ptr = ctxt->fetch.data;
        ctxt->fetch.end = ctxt->fetch.data + insn_len;
        ctxt->opcode_len = 1;
+       ctxt->intercept = x86_intercept_none;
        if (insn_len > 0)
                memcpy(ctxt->fetch.data, insn, insn_len);
        else {
index 7668fed1ce6527767fbf6c8e10647c6627686f58..750ff0b294047cf83b2c28f72cfbd057375fc7b1 100644 (file)
@@ -378,12 +378,15 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
                if (e->fields.delivery_mode == APIC_DM_FIXED) {
                        struct kvm_lapic_irq irq;
 
-                       irq.shorthand = APIC_DEST_NOSHORT;
                        irq.vector = e->fields.vector;
                        irq.delivery_mode = e->fields.delivery_mode << 8;
-                       irq.dest_id = e->fields.dest_id;
                        irq.dest_mode =
                            kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
+                       irq.level = false;
+                       irq.trig_mode = e->fields.trig_mode;
+                       irq.shorthand = APIC_DEST_NOSHORT;
+                       irq.dest_id = e->fields.dest_id;
+                       irq.msi_redir_hint = false;
                        bitmap_zero(&vcpu_bitmap, 16);
                        kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
                                                 &vcpu_bitmap);
index 24c0b2ba8fb9d34e5d1b22cefe24d6b704cc9ba1..91000501756ec04214ca14f6083440d2a351d1a4 100644 (file)
@@ -6312,7 +6312,8 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu,
        enum exit_fastpath_completion *exit_fastpath)
 {
        if (!is_guest_mode(vcpu) &&
-               to_svm(vcpu)->vmcb->control.exit_code == EXIT_REASON_MSR_WRITE)
+           to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
+           to_svm(vcpu)->vmcb->control.exit_info_1)
                *exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
 }
 
index e920d7834d736ee379722abb445b5847a82a279e..9750e590c89d75e14c174e8ae64df2ccdfd25d61 100644 (file)
@@ -224,7 +224,7 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
                return;
 
        kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
-       vmx->nested.hv_evmcs_vmptr = -1ull;
+       vmx->nested.hv_evmcs_vmptr = 0;
        vmx->nested.hv_evmcs = NULL;
 }
 
@@ -1923,7 +1923,8 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
        if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa))
                return 1;
 
-       if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
+       if (unlikely(!vmx->nested.hv_evmcs ||
+                    evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
                if (!vmx->nested.hv_evmcs)
                        vmx->nested.current_vmptr = -1ull;
 
index 40b1e6138cd5ce30e62790530131d3444cdd2618..26f8f31563e9b7b766d2fdbda135592085b3f148 100644 (file)
@@ -2338,6 +2338,17 @@ static void hardware_disable(void)
        kvm_cpu_vmxoff();
 }
 
+/*
+ * There is no X86_FEATURE for SGX yet, but anyway we need to query CPUID
+ * directly instead of going through cpu_has(), to ensure KVM is trapping
+ * ENCLS whenever it's supported in hardware.  It does not matter whether
+ * the host OS supports or has enabled SGX.
+ */
+static bool cpu_has_sgx(void)
+{
+       return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0));
+}
+
 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
                                      u32 msr, u32 *result)
 {
@@ -2418,8 +2429,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
                        SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
                        SECONDARY_EXEC_PT_USE_GPA |
                        SECONDARY_EXEC_PT_CONCEAL_VMX |
-                       SECONDARY_EXEC_ENABLE_VMFUNC |
-                       SECONDARY_EXEC_ENCLS_EXITING;
+                       SECONDARY_EXEC_ENABLE_VMFUNC;
+               if (cpu_has_sgx())
+                       opt2 |= SECONDARY_EXEC_ENCLS_EXITING;
                if (adjust_vmx_controls(min2, opt2,
                                        MSR_IA32_VMX_PROCBASED_CTLS2,
                                        &_cpu_based_2nd_exec_control) < 0)
index 5de200663f51476b025f801ee6b3dbf64e8d3228..3156e25b077495c823594f84004491ffcc782551 100644 (file)
@@ -7195,10 +7195,12 @@ static void kvm_timer_init(void)
 
                cpu = get_cpu();
                policy = cpufreq_cpu_get(cpu);
-               if (policy && policy->cpuinfo.max_freq)
-                       max_tsc_khz = policy->cpuinfo.max_freq;
+               if (policy) {
+                       if (policy->cpuinfo.max_freq)
+                               max_tsc_khz = policy->cpuinfo.max_freq;
+                       cpufreq_cpu_put(policy);
+               }
                put_cpu();
-               cpufreq_cpu_put(policy);
 #endif
                cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
                                          CPUFREQ_TRANSITION_NOTIFIER);
index 44e4beb4239f93bb83876bd21ab6f8007460be71..935a91e1fd7744a6af7e5e3bd1206ad4d197b067 100644 (file)
@@ -106,6 +106,19 @@ static unsigned int __ioremap_check_encrypted(struct resource *res)
        return 0;
 }
 
+/*
+ * The EFI runtime services data area is not covered by walk_mem_res(), but must
+ * be mapped encrypted when SEV is active.
+ */
+static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
+{
+       if (!sev_active())
+               return;
+
+       if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
+               desc->flags |= IORES_MAP_ENCRYPTED;
+}
+
 static int __ioremap_collect_map_flags(struct resource *res, void *arg)
 {
        struct ioremap_desc *desc = arg;
@@ -124,6 +137,9 @@ static int __ioremap_collect_map_flags(struct resource *res, void *arg)
  * To avoid multiple resource walks, this function walks resources marked as
  * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
  * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
+ *
+ * After that, deal with misc other ranges in __ioremap_check_other() which do
+ * not fall into the above category.
  */
 static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
                                struct ioremap_desc *desc)
@@ -135,6 +151,8 @@ static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
        memset(desc, 0, sizeof(struct ioremap_desc));
 
        walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
+
+       __ioremap_check_other(addr, desc);
 }
 
 /*
index 27ca68621137ad5418e647c859b39b12a588f9c2..9a599cc28c290c7d79ef6512c5173a4ee7605bc5 100644 (file)
@@ -1318,7 +1318,7 @@ static bool iocg_is_idle(struct ioc_gq *iocg)
                return false;
 
        /* is something in flight? */
-       if (atomic64_read(&iocg->done_vtime) < atomic64_read(&iocg->vtime))
+       if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
                return false;
 
        return true;
index 856356b1619e83f05fc86fc37ed4a9086b413d86..74cedea560348167ab951c53dc3d8e14fe4f70dc 100644 (file)
@@ -398,6 +398,28 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
        WARN_ON(e && (rq->tag != -1));
 
        if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
+               /*
+                * Firstly normal IO request is inserted to scheduler queue or
+                * sw queue, meantime we add flush request to dispatch queue(
+                * hctx->dispatch) directly and there is at most one in-flight
+                * flush request for each hw queue, so it doesn't matter to add
+                * flush request to tail or front of the dispatch queue.
+                *
+                * Secondly in case of NCQ, flush request belongs to non-NCQ
+                * command, and queueing it will fail when there is any
+                * in-flight normal IO request(NCQ command). When adding flush
+                * rq to the front of hctx->dispatch, it is easier to introduce
+                * extra time to flush rq's latency because of S_SCHED_RESTART
+                * compared with adding to the tail of dispatch queue, then
+                * chance of flush merge is increased, and less flush requests
+                * will be issued to controller. It is observed that ~10% time
+                * is saved in blktests block/004 on disk attached to AHCI/NCQ
+                * drive when adding flush rq to the front of hctx->dispatch.
+                *
+                * Simply queue flush rq to the front of hctx->dispatch so that
+                * intensive flush workloads can benefit in case of NCQ HW.
+                */
+               at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
                blk_mq_request_bypass_insert(rq, at_head, false);
                goto run;
        }
index ff6268970ddc069f84b3977069cca72ba10627c6..9c2e13ce0d19554a01eb72c03d5c0fcba7a67a5a 100644 (file)
@@ -301,6 +301,42 @@ struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
 }
 EXPORT_SYMBOL_GPL(disk_map_sector_rcu);
 
+/**
+ * disk_has_partitions
+ * @disk: gendisk of interest
+ *
+ * Walk through the partition table and check if valid partition exists.
+ *
+ * CONTEXT:
+ * Don't care.
+ *
+ * RETURNS:
+ * True if the gendisk has at least one valid non-zero size partition.
+ * Otherwise false.
+ */
+bool disk_has_partitions(struct gendisk *disk)
+{
+       struct disk_part_tbl *ptbl;
+       int i;
+       bool ret = false;
+
+       rcu_read_lock();
+       ptbl = rcu_dereference(disk->part_tbl);
+
+       /* Iterate partitions skipping the whole device at index 0 */
+       for (i = 1; i < ptbl->len; i++) {
+               if (rcu_dereference(ptbl->part[i])) {
+                       ret = true;
+                       break;
+               }
+       }
+
+       rcu_read_unlock();
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(disk_has_partitions);
+
 /*
  * Can be deleted altogether. Later.
  *
index 8db8c0fb5e2dac6851893e62d25b682177539a7f..7af74fb450a0d01c5aaf90e9ba1a2859041b6250 100644 (file)
@@ -91,7 +91,7 @@
 #ifdef GENERAL_DEBUG
 #define PRINTK(args...) printk(args)
 #else
-#define PRINTK(args...)
+#define PRINTK(args...) do {} while (0)
 #endif /* GENERAL_DEBUG */
 
 #ifdef EXTRA_DEBUG
index b8313a04422db69495d547cd10a8ef39c88510a1..48efa7a047f3ec0e4149d24a836151928c717793 100644 (file)
@@ -111,7 +111,7 @@ config CFAG12864B
          If unsure, say N.
 
 config CFAG12864B_RATE
-       int "Refresh rate (hertz)"
+       int "Refresh rate (hertz)"
        depends on CFAG12864B
        default "20"
        ---help---
@@ -329,7 +329,7 @@ config PANEL_LCD_PROTO
 
 config PANEL_LCD_PIN_E
        depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
-        int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
+       int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
        range -17 17
        default 14
        ---help---
@@ -344,7 +344,7 @@ config PANEL_LCD_PIN_E
 
 config PANEL_LCD_PIN_RS
        depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
-        int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
+       int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
        range -17 17
        default 17
        ---help---
@@ -359,7 +359,7 @@ config PANEL_LCD_PIN_RS
 
 config PANEL_LCD_PIN_RW
        depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
-        int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
+       int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
        range -17 17
        default 16
        ---help---
@@ -374,7 +374,7 @@ config PANEL_LCD_PIN_RW
 
 config PANEL_LCD_PIN_SCL
        depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
-        int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
+       int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
        range -17 17
        default 1
        ---help---
@@ -389,7 +389,7 @@ config PANEL_LCD_PIN_SCL
 
 config PANEL_LCD_PIN_SDA
        depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
-        int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
+       int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
        range -17 17
        default 2
        ---help---
@@ -404,12 +404,12 @@ config PANEL_LCD_PIN_SDA
 
 config PANEL_LCD_PIN_BL
        depends on PANEL_PROFILE="0" && PANEL_LCD="1"
-        int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
+       int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
        range -17 17
        default 0
        ---help---
          This describes the number of the parallel port pin to which the LCD 'BL' signal
-          has been connected. It can be :
+         has been connected. It can be :
 
                  0 : no connection (eg: connected to ground)
              1..17 : directly connected to any of these pins on the DB25 plug
index 874c259a88291880d368440437444dcf7e7b625d..c0da3820454b2ca944041d167e5f40857f0caeb9 100644 (file)
@@ -88,7 +88,7 @@ struct charlcd_priv {
                int len;
        } esc_seq;
 
-       unsigned long long drvdata[0];
+       unsigned long long drvdata[];
 };
 
 #define charlcd_to_priv(p)     container_of(p, struct charlcd_priv, lcd)
index efb928e25aef35cac24fa7f4f240391277bf9e0b..1cce409ce5cacbc8a9a7ae271233c981b0ef2dcd 100644 (file)
@@ -356,7 +356,6 @@ static int img_ascii_lcd_probe(struct platform_device *pdev)
        const struct of_device_id *match;
        const struct img_ascii_lcd_config *cfg;
        struct img_ascii_lcd_ctx *ctx;
-       struct resource *res;
        int err;
 
        match = of_match_device(img_ascii_lcd_matches, &pdev->dev);
@@ -378,8 +377,7 @@ static int img_ascii_lcd_probe(struct platform_device *pdev)
                                         &ctx->offset))
                        return -EINVAL;
        } else {
-               res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-               ctx->base = devm_ioremap_resource(&pdev->dev, res);
+               ctx->base = devm_platform_ioremap_resource(pdev, 0);
                if (IS_ERR(ctx->base))
                        return PTR_ERR(ctx->base);
        }
index 7fa654f1288b80ffa73e11c49f7d340695d5de9e..b5ce7b0857953e32c90743c39b920a9f61a89f2a 100644 (file)
@@ -363,10 +363,10 @@ static void setup_pdev_dma_masks(struct platform_device *pdev)
 {
        if (!pdev->dev.coherent_dma_mask)
                pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-       if (!pdev->dma_mask)
-               pdev->dma_mask = DMA_BIT_MASK(32);
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dma_mask;
+       if (!pdev->dev.dma_mask) {
+               pdev->platform_dma_mask = DMA_BIT_MASK(32);
+               pdev->dev.dma_mask = &pdev->platform_dma_mask;
+       }
 };
 
 /**
@@ -662,20 +662,8 @@ struct platform_device *platform_device_register_full(
        pdev->dev.of_node_reused = pdevinfo->of_node_reused;
 
        if (pdevinfo->dma_mask) {
-               /*
-                * This memory isn't freed when the device is put,
-                * I don't have a nice idea for that though.  Conceptually
-                * dma_mask in struct device should not be a pointer.
-                * See http://thread.gmane.org/gmane.linux.kernel.pci/9081
-                */
-               pdev->dev.dma_mask =
-                       kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
-               if (!pdev->dev.dma_mask)
-                       goto err;
-
-               kmemleak_ignore(pdev->dev.dma_mask);
-
-               *pdev->dev.dma_mask = pdevinfo->dma_mask;
+               pdev->platform_dma_mask = pdevinfo->dma_mask;
+               pdev->dev.dma_mask = &pdev->platform_dma_mask;
                pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
        }
 
@@ -700,7 +688,6 @@ struct platform_device *platform_device_register_full(
        if (ret) {
 err:
                ACPI_COMPANION_SET(&pdev->dev, NULL);
-               kfree(pdev->dev.dma_mask);
                platform_device_put(pdev);
                return ERR_PTR(ret);
        }
index 54158766334b20cb7eeb0a5ead976843caa3df40..0736248999b0da7f81d81a1c9525483759444634 100644 (file)
@@ -245,13 +245,20 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
        err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
        if (err) {
                virtqueue_kick(vblk->vqs[qid].vq);
-               blk_mq_stop_hw_queue(hctx);
+               /* Don't stop the queue if -ENOMEM: we may have failed to
+                * bounce the buffer due to global resource outage.
+                */
+               if (err == -ENOSPC)
+                       blk_mq_stop_hw_queue(hctx);
                spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
-               /* Out of mem doesn't actually happen, since we fall back
-                * to direct descriptors */
-               if (err == -ENOMEM || err == -ENOSPC)
+               switch (err) {
+               case -ENOSPC:
                        return BLK_STS_DEV_RESOURCE;
-               return BLK_STS_IOERR;
+               case -ENOMEM:
+                       return BLK_STS_RESOURCE;
+               default:
+                       return BLK_STS_IOERR;
+               }
        }
 
        if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
index c78127ccbc0dd019b34ec784f439179c3d91d466..638c693e17adab63bc46a5b6979902865c34e8a2 100644 (file)
@@ -194,7 +194,7 @@ static int platform_ipmi_probe(struct platform_device *pdev)
        else
                io.slave_addr = slave_addr;
 
-       io.irq = platform_get_irq(pdev, 0);
+       io.irq = platform_get_irq_optional(pdev, 0);
        if (io.irq > 0)
                io.irq_setup = ipmi_std_irq_setup;
        else
@@ -378,7 +378,7 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
                io.irq = tmp;
                io.irq_setup = acpi_gpe_irq_setup;
        } else {
-               int irq = platform_get_irq(pdev, 0);
+               int irq = platform_get_irq_optional(pdev, 0);
 
                if (irq > 0) {
                        io.irq = irq;
index f0f2b599fd7e90a5faacc1499f28aa058ef79d0d..95adf6c6db3db641d5be25d8573abd4232f309d4 100644 (file)
@@ -4713,7 +4713,7 @@ EXPORT_SYMBOL(of_clk_get_by_name);
  *
  * Returns: The number of clocks that are possible parents of this node
  */
-unsigned int of_clk_get_parent_count(struct device_node *np)
+unsigned int of_clk_get_parent_count(const struct device_node *np)
 {
        int count;
 
@@ -4725,7 +4725,7 @@ unsigned int of_clk_get_parent_count(struct device_node *np)
 }
 EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
 
-const char *of_clk_get_parent_name(struct device_node *np, int index)
+const char *of_clk_get_parent_name(const struct device_node *np, int index)
 {
        struct of_phandle_args clkspec;
        struct property *prop;
index dd7af41e47eb96b7180410927852782e6873b850..0a5d395bce93584f85bca6a0fb390147eda26aee 100644 (file)
@@ -592,24 +592,6 @@ static struct clk_branch disp_cc_mdss_rot_clk = {
        },
 };
 
-static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
-       .halt_reg = 0x400c,
-       .halt_check = BRANCH_HALT,
-       .clkr = {
-               .enable_reg = 0x400c,
-               .enable_mask = BIT(0),
-               .hw.init = &(struct clk_init_data){
-                       .name = "disp_cc_mdss_rscc_ahb_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
-                       },
-                       .num_parents = 1,
-                       .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
-                       .ops = &clk_branch2_ops,
-               },
-       },
-};
-
 static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
        .halt_reg = 0x4008,
        .halt_check = BRANCH_HALT,
@@ -687,7 +669,6 @@ static struct clk_regmap *disp_cc_sc7180_clocks[] = {
        [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
        [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
        [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
-       [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
        [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
        [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
        [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
index c363c3cc544e2cd796d3678ad364b069b5368c81..276e5ecd48403f41914792981dd3c015e7a4c9fa 100644 (file)
@@ -97,7 +97,7 @@ static struct clk_branch video_cc_vcodec0_axi_clk = {
 
 static struct clk_branch video_cc_vcodec0_core_clk = {
        .halt_reg = 0x890,
-       .halt_check = BRANCH_HALT,
+       .halt_check = BRANCH_HALT_VOTED,
        .clkr = {
                .enable_reg = 0x890,
                .enable_mask = BIT(0),
index 7576450c8254b8cc8c1cada05a1c9ac61922ea1f..aff3dfb4d7ba643219254820282a4f4d903edab7 100644 (file)
@@ -83,13 +83,16 @@ static ssize_t
 efivar_attr_read(struct efivar_entry *entry, char *buf)
 {
        struct efi_variable *var = &entry->var;
+       unsigned long size = sizeof(var->Data);
        char *str = buf;
+       int ret;
 
        if (!entry || !buf)
                return -EINVAL;
 
-       var->DataSize = 1024;
-       if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
+       ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
+       var->DataSize = size;
+       if (ret)
                return -EIO;
 
        if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
@@ -116,13 +119,16 @@ static ssize_t
 efivar_size_read(struct efivar_entry *entry, char *buf)
 {
        struct efi_variable *var = &entry->var;
+       unsigned long size = sizeof(var->Data);
        char *str = buf;
+       int ret;
 
        if (!entry || !buf)
                return -EINVAL;
 
-       var->DataSize = 1024;
-       if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
+       ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
+       var->DataSize = size;
+       if (ret)
                return -EIO;
 
        str += sprintf(str, "0x%lx\n", var->DataSize);
@@ -133,12 +139,15 @@ static ssize_t
 efivar_data_read(struct efivar_entry *entry, char *buf)
 {
        struct efi_variable *var = &entry->var;
+       unsigned long size = sizeof(var->Data);
+       int ret;
 
        if (!entry || !buf)
                return -EINVAL;
 
-       var->DataSize = 1024;
-       if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
+       ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
+       var->DataSize = size;
+       if (ret)
                return -EIO;
 
        memcpy(buf, var->Data, var->DataSize);
@@ -199,6 +208,9 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
        u8 *data;
        int err;
 
+       if (!entry || !buf)
+               return -EINVAL;
+
        if (in_compat_syscall()) {
                struct compat_efi_variable *compat;
 
@@ -250,14 +262,16 @@ efivar_show_raw(struct efivar_entry *entry, char *buf)
 {
        struct efi_variable *var = &entry->var;
        struct compat_efi_variable *compat;
+       unsigned long datasize = sizeof(var->Data);
        size_t size;
+       int ret;
 
        if (!entry || !buf)
                return 0;
 
-       var->DataSize = 1024;
-       if (efivar_entry_get(entry, &entry->var.Attributes,
-                            &entry->var.DataSize, entry->var.Data))
+       ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data);
+       var->DataSize = datasize;
+       if (ret)
                return -EIO;
 
        if (in_compat_syscall()) {
index ea5cd1e1730494e60bca57643f65dc23bcfb7f00..e7933930a65770c3d63d9d72e90b86798f25a8b0 100644 (file)
@@ -146,14 +146,14 @@ static const struct of_device_id komeda_of_match[] = {
 
 MODULE_DEVICE_TABLE(of, komeda_of_match);
 
-static int komeda_rt_pm_suspend(struct device *dev)
+static int __maybe_unused komeda_rt_pm_suspend(struct device *dev)
 {
        struct komeda_drv *mdrv = dev_get_drvdata(dev);
 
        return komeda_dev_suspend(mdrv->mdev);
 }
 
-static int komeda_rt_pm_resume(struct device *dev)
+static int __maybe_unused komeda_rt_pm_resume(struct device *dev)
 {
        struct komeda_drv *mdrv = dev_get_drvdata(dev);
 
index b615b7dfdd9dab92d7149f5af7b671f774d9277e..a4fc4e6aee3927ce642b821b0dc3cf2e2c69eb92 100644 (file)
@@ -156,10 +156,8 @@ int bochs_hw_init(struct drm_device *dev)
                size = min(size, mem);
        }
 
-       if (pci_request_region(pdev, 0, "bochs-drm") != 0) {
-               DRM_ERROR("Cannot request framebuffer\n");
-               return -EBUSY;
-       }
+       if (pci_request_region(pdev, 0, "bochs-drm") != 0)
+               DRM_WARN("Cannot request framebuffer, boot fb still active?\n");
 
        bochs->fb_map = ioremap(addr, size);
        if (bochs->fb_map == NULL) {
index 67fca439bbfb476f442262bd1094e7f59a27aad2..24965e53d351bf711906c5aa2950e612883cd61e 100644 (file)
@@ -1624,28 +1624,34 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
                frame.colorspace = HDMI_COLORSPACE_RGB;
 
        /* Set up colorimetry */
-       switch (hdmi->hdmi_data.enc_out_encoding) {
-       case V4L2_YCBCR_ENC_601:
-               if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
-                       frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
-               else
+       if (!hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
+               switch (hdmi->hdmi_data.enc_out_encoding) {
+               case V4L2_YCBCR_ENC_601:
+                       if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
+                               frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
+                       else
+                               frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
+                       frame.extended_colorimetry =
+                                       HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+                       break;
+               case V4L2_YCBCR_ENC_709:
+                       if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
+                               frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
+                       else
+                               frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
+                       frame.extended_colorimetry =
+                                       HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
+                       break;
+               default: /* Carries no data */
                        frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
+                       frame.extended_colorimetry =
+                                       HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+                       break;
+               }
+       } else {
+               frame.colorimetry = HDMI_COLORIMETRY_NONE;
                frame.extended_colorimetry =
-                               HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
-               break;
-       case V4L2_YCBCR_ENC_709:
-               if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
-                       frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
-               else
-                       frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
-               frame.extended_colorimetry =
-                               HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
-               break;
-       default: /* Carries no data */
-               frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
-               frame.extended_colorimetry =
-                               HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
-               break;
+                       HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
        }
 
        frame.scan_mode = HDMI_SCAN_MODE_NONE;
index b481cafdde280bbaddf0e9168c448fafba95d095..825abe38201acfe6aecd742f89b2b5b3e9165df0 100644 (file)
@@ -542,10 +542,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
        }
 
        DRM_DEBUG_LEASE("Creating lease\n");
+       /* lessee will take the ownership of leases */
        lessee = drm_lease_create(lessor, &leases);
 
        if (IS_ERR(lessee)) {
                ret = PTR_ERR(lessee);
+               idr_destroy(&leases);
                goto out_leases;
        }
 
@@ -580,7 +582,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
 
 out_leases:
        put_unused_fd(fd);
-       idr_destroy(&leases);
 
        DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
        return ret;
index 050adda7c1bdf5d1ea511f0d2ada1e890a5919c7..05b35ac33ce33b4063b6993c6054b54be78aea88 100644 (file)
@@ -313,6 +313,7 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
        pm_runtime_get_noresume(&pdev->dev);
 
        i2c_del_adapter(&dev->adapter);
+       devm_free_irq(&pdev->dev, dev->irq, dev);
        pci_free_irq_vectors(pdev);
 }
 
index 3a9e840a3546673b33dd7f4b9d1d3613632721da..a4a6825c87583f4d8b3d51800e753098597e3f1b 100644 (file)
@@ -348,7 +348,7 @@ static struct gpio_desc *i2c_gpio_get_desc(struct device *dev,
        if (ret == -ENOENT)
                retdesc = ERR_PTR(-EPROBE_DEFER);
 
-       if (ret != -EPROBE_DEFER)
+       if (PTR_ERR(retdesc) != -EPROBE_DEFER)
                dev_err(dev, "error trying to get descriptor: %d\n", ret);
 
        return retdesc;
index ca4f096fef74930254e772da8bd54dfbc2454716..a9c03f5c34825a95901ca420bc2164e74c0651f1 100644 (file)
 #define TCOBASE                0x050
 #define TCOCTL         0x054
 
-#define ACPIBASE               0x040
-#define ACPIBASE_SMI_OFF       0x030
-#define ACPICTRL               0x044
-#define ACPICTRL_EN            0x080
-
 #define SBREG_BAR              0x10
 #define SBREG_SMBCTRL          0xc6000c
 #define SBREG_SMBCTRL_DNV      0xcf000c
@@ -1553,7 +1548,7 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
                pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, hidden);
        spin_unlock(&p2sb_spinlock);
 
-       res = &tco_res[ICH_RES_MEM_OFF];
+       res = &tco_res[1];
        if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
                res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
        else
@@ -1563,7 +1558,7 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
        res->flags = IORESOURCE_MEM;
 
        return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
-                                       tco_res, 3, &spt_tco_platform_data,
+                                       tco_res, 2, &spt_tco_platform_data,
                                        sizeof(spt_tco_platform_data));
 }
 
@@ -1576,17 +1571,16 @@ static struct platform_device *
 i801_add_tco_cnl(struct i801_priv *priv, struct pci_dev *pci_dev,
                 struct resource *tco_res)
 {
-       return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
-                                       tco_res, 2, &cnl_tco_platform_data,
-                                       sizeof(cnl_tco_platform_data));
+       return platform_device_register_resndata(&pci_dev->dev,
+                       "iTCO_wdt", -1, tco_res, 1, &cnl_tco_platform_data,
+                       sizeof(cnl_tco_platform_data));
 }
 
 static void i801_add_tco(struct i801_priv *priv)
 {
-       u32 base_addr, tco_base, tco_ctl, ctrl_val;
        struct pci_dev *pci_dev = priv->pci_dev;
-       struct resource tco_res[3], *res;
-       unsigned int devfn;
+       struct resource tco_res[2], *res;
+       u32 tco_base, tco_ctl;
 
        /* If we have ACPI based watchdog use that instead */
        if (acpi_has_watchdog())
@@ -1601,30 +1595,15 @@ static void i801_add_tco(struct i801_priv *priv)
                return;
 
        memset(tco_res, 0, sizeof(tco_res));
-
-       res = &tco_res[ICH_RES_IO_TCO];
-       res->start = tco_base & ~1;
-       res->end = res->start + 32 - 1;
-       res->flags = IORESOURCE_IO;
-
        /*
-        * Power Management registers.
+        * Always populate the main iTCO IO resource here. The second entry
+        * for NO_REBOOT MMIO is filled by the SPT specific function.
         */
-       devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 2);
-       pci_bus_read_config_dword(pci_dev->bus, devfn, ACPIBASE, &base_addr);
-
-       res = &tco_res[ICH_RES_IO_SMI];
-       res->start = (base_addr & ~1) + ACPIBASE_SMI_OFF;
-       res->end = res->start + 3;
+       res = &tco_res[0];
+       res->start = tco_base & ~1;
+       res->end = res->start + 32 - 1;
        res->flags = IORESOURCE_IO;
 
-       /*
-        * Enable the ACPI I/O space.
-        */
-       pci_bus_read_config_dword(pci_dev->bus, devfn, ACPICTRL, &ctrl_val);
-       ctrl_val |= ACPICTRL_EN;
-       pci_bus_write_config_dword(pci_dev->bus, devfn, ACPICTRL, ctrl_val);
-
        if (priv->features & FEATURE_TCO_CNL)
                priv->tco_pdev = i801_add_tco_cnl(priv, pci_dev, tco_res);
        else
index 8f3dbc97a0571e3de740677e680ec25ef4348775..8b0ff780919b1d0436f1f93c6567e36561803d02 100644 (file)
@@ -394,9 +394,17 @@ EXPORT_SYMBOL_GPL(i2c_acpi_find_adapter_by_handle);
 static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
 {
        struct device *dev;
+       struct i2c_client *client;
 
        dev = bus_find_device_by_acpi_dev(&i2c_bus_type, adev);
-       return dev ? i2c_verify_client(dev) : NULL;
+       if (!dev)
+               return NULL;
+
+       client = i2c_verify_client(dev);
+       if (!client)
+               put_device(dev);
+
+       return client;
 }
 
 static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
index aac132bd1ef00101c385441ceca43115176d8bdd..20cce366e951813908aa2e7cb3cc8bcd474cf6c3 100644 (file)
@@ -3826,7 +3826,7 @@ int amd_iommu_activate_guest_mode(void *data)
        entry->lo.fields_vapic.ga_tag      = ir_data->ga_tag;
 
        return modify_irte_ga(ir_data->irq_2_irte.devid,
-                             ir_data->irq_2_irte.index, entry, NULL);
+                             ir_data->irq_2_irte.index, entry, ir_data);
 }
 EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
 
@@ -3852,7 +3852,7 @@ int amd_iommu_deactivate_guest_mode(void *data)
                                APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
 
        return modify_irte_ga(ir_data->irq_2_irte.devid,
-                             ir_data->irq_2_irte.index, entry, NULL);
+                             ir_data->irq_2_irte.index, entry, ir_data);
 }
 EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
 
index a2e96a5fd9a7b3a6d9e2bf35ea1a8d7bdc78f1f2..ba128d1cdaeeb15a5fcfae09e8c41544b2990883 100644 (file)
@@ -177,15 +177,15 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
        start -= iova_offset(iovad, start);
        num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
 
-       msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
-       if (!msi_page)
-               return -ENOMEM;
-
        for (i = 0; i < num_pages; i++) {
-               msi_page[i].phys = start;
-               msi_page[i].iova = start;
-               INIT_LIST_HEAD(&msi_page[i].list);
-               list_add(&msi_page[i].list, &cookie->msi_page_list);
+               msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
+               if (!msi_page)
+                       return -ENOMEM;
+
+               msi_page->phys = start;
+               msi_page->iova = start;
+               INIT_LIST_HEAD(&msi_page->list);
+               list_add(&msi_page->list, &cookie->msi_page_list);
                start += iovad->granule;
        }
 
index 071bb42bbbc5bef6eba8584ec50ce24eeea06993..f77dae7ba7d4089f9fbf441b70a3e121f1dd7deb 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include <linux/iommu.h>
 #include <linux/numa.h>
+#include <linux/limits.h>
 #include <asm/irq_remapping.h>
 #include <asm/iommu_table.h>
 
@@ -128,6 +129,13 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
 
        BUG_ON(dev->is_virtfn);
 
+       /*
+        * Ignore devices that have a domain number higher than what can
+        * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
+        */
+       if (pci_domain_nr(dev->bus) > U16_MAX)
+               return NULL;
+
        /* Only generate path[] for device addition event */
        if (event == BUS_NOTIFY_ADD_DEVICE)
                for (tmp = dev; tmp; tmp = tmp->bus->self)
@@ -363,7 +371,8 @@ dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
 {
        struct dmar_drhd_unit *dmaru;
 
-       list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
+       list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list,
+                               dmar_rcu_check())
                if (dmaru->segment == drhd->segment &&
                    dmaru->reg_base_addr == drhd->address)
                        return dmaru;
@@ -440,12 +449,13 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
 
        /* Check for NUL termination within the designated length */
        if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
-               WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
+               pr_warn(FW_BUG
                           "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
                           "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
                           dmi_get_system_info(DMI_BIOS_VENDOR),
                           dmi_get_system_info(DMI_BIOS_VERSION),
                           dmi_get_system_info(DMI_PRODUCT_VERSION));
+               add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
                return -EINVAL;
        }
        pr_info("ANDD device: %x name: %s\n", andd->device_number,
@@ -471,14 +481,14 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
                        return 0;
                }
        }
-       WARN_TAINT(
-               1, TAINT_FIRMWARE_WORKAROUND,
+       pr_warn(FW_BUG
                "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
                "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
-               drhd->reg_base_addr,
+               rhsa->base_address,
                dmi_get_system_info(DMI_BIOS_VENDOR),
                dmi_get_system_info(DMI_BIOS_VERSION),
                dmi_get_system_info(DMI_PRODUCT_VERSION));
+       add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
 
        return 0;
 }
@@ -827,14 +837,14 @@ int __init dmar_table_init(void)
 
 static void warn_invalid_dmar(u64 addr, const char *message)
 {
-       WARN_TAINT_ONCE(
-               1, TAINT_FIRMWARE_WORKAROUND,
+       pr_warn_once(FW_BUG
                "Your BIOS is broken; DMAR reported at address %llx%s!\n"
                "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
                addr, message,
                dmi_get_system_info(DMI_BIOS_VENDOR),
                dmi_get_system_info(DMI_BIOS_VERSION),
                dmi_get_system_info(DMI_PRODUCT_VERSION));
+       add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
 }
 
 static int __ref
index c1257bef553cef77881a54dd0372babe03b1544b..3eb1fe240fb00981471c3bc2b40a87d9ca81e4bc 100644 (file)
@@ -33,38 +33,42 @@ struct iommu_regset {
 
 #define IOMMU_REGSET_ENTRY(_reg_)                                      \
        { DMAR_##_reg_##_REG, __stringify(_reg_) }
-static const struct iommu_regset iommu_regs[] = {
+
+static const struct iommu_regset iommu_regs_32[] = {
        IOMMU_REGSET_ENTRY(VER),
-       IOMMU_REGSET_ENTRY(CAP),
-       IOMMU_REGSET_ENTRY(ECAP),
        IOMMU_REGSET_ENTRY(GCMD),
        IOMMU_REGSET_ENTRY(GSTS),
-       IOMMU_REGSET_ENTRY(RTADDR),
-       IOMMU_REGSET_ENTRY(CCMD),
        IOMMU_REGSET_ENTRY(FSTS),
        IOMMU_REGSET_ENTRY(FECTL),
        IOMMU_REGSET_ENTRY(FEDATA),
        IOMMU_REGSET_ENTRY(FEADDR),
        IOMMU_REGSET_ENTRY(FEUADDR),
-       IOMMU_REGSET_ENTRY(AFLOG),
        IOMMU_REGSET_ENTRY(PMEN),
        IOMMU_REGSET_ENTRY(PLMBASE),
        IOMMU_REGSET_ENTRY(PLMLIMIT),
+       IOMMU_REGSET_ENTRY(ICS),
+       IOMMU_REGSET_ENTRY(PRS),
+       IOMMU_REGSET_ENTRY(PECTL),
+       IOMMU_REGSET_ENTRY(PEDATA),
+       IOMMU_REGSET_ENTRY(PEADDR),
+       IOMMU_REGSET_ENTRY(PEUADDR),
+};
+
+static const struct iommu_regset iommu_regs_64[] = {
+       IOMMU_REGSET_ENTRY(CAP),
+       IOMMU_REGSET_ENTRY(ECAP),
+       IOMMU_REGSET_ENTRY(RTADDR),
+       IOMMU_REGSET_ENTRY(CCMD),
+       IOMMU_REGSET_ENTRY(AFLOG),
        IOMMU_REGSET_ENTRY(PHMBASE),
        IOMMU_REGSET_ENTRY(PHMLIMIT),
        IOMMU_REGSET_ENTRY(IQH),
        IOMMU_REGSET_ENTRY(IQT),
        IOMMU_REGSET_ENTRY(IQA),
-       IOMMU_REGSET_ENTRY(ICS),
        IOMMU_REGSET_ENTRY(IRTA),
        IOMMU_REGSET_ENTRY(PQH),
        IOMMU_REGSET_ENTRY(PQT),
        IOMMU_REGSET_ENTRY(PQA),
-       IOMMU_REGSET_ENTRY(PRS),
-       IOMMU_REGSET_ENTRY(PECTL),
-       IOMMU_REGSET_ENTRY(PEDATA),
-       IOMMU_REGSET_ENTRY(PEADDR),
-       IOMMU_REGSET_ENTRY(PEUADDR),
        IOMMU_REGSET_ENTRY(MTRRCAP),
        IOMMU_REGSET_ENTRY(MTRRDEF),
        IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000),
@@ -127,10 +131,16 @@ static int iommu_regset_show(struct seq_file *m, void *unused)
                 * by adding the offset to the pointer (virtual address).
                 */
                raw_spin_lock_irqsave(&iommu->register_lock, flag);
-               for (i = 0 ; i < ARRAY_SIZE(iommu_regs); i++) {
-                       value = dmar_readq(iommu->reg + iommu_regs[i].offset);
+               for (i = 0 ; i < ARRAY_SIZE(iommu_regs_32); i++) {
+                       value = dmar_readl(iommu->reg + iommu_regs_32[i].offset);
+                       seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
+                                  iommu_regs_32[i].regs, iommu_regs_32[i].offset,
+                                  value);
+               }
+               for (i = 0 ; i < ARRAY_SIZE(iommu_regs_64); i++) {
+                       value = dmar_readq(iommu->reg + iommu_regs_64[i].offset);
                        seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
-                                  iommu_regs[i].regs, iommu_regs[i].offset,
+                                  iommu_regs_64[i].regs, iommu_regs_64[i].offset,
                                   value);
                }
                raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
@@ -272,9 +282,16 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused)
 {
        struct dmar_drhd_unit *drhd;
        struct intel_iommu *iommu;
+       u32 sts;
 
        rcu_read_lock();
        for_each_active_iommu(iommu, drhd) {
+               sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
+               if (!(sts & DMA_GSTS_TES)) {
+                       seq_printf(m, "DMA Remapping is not enabled on %s\n",
+                                  iommu->name);
+                       continue;
+               }
                root_tbl_walk(m, iommu);
                seq_putc(m, '\n');
        }
@@ -415,6 +432,7 @@ static int ir_translation_struct_show(struct seq_file *m, void *unused)
        struct dmar_drhd_unit *drhd;
        struct intel_iommu *iommu;
        u64 irta;
+       u32 sts;
 
        rcu_read_lock();
        for_each_active_iommu(iommu, drhd) {
@@ -424,7 +442,8 @@ static int ir_translation_struct_show(struct seq_file *m, void *unused)
                seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n",
                           iommu->name);
 
-               if (iommu->ir_table) {
+               sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
+               if (iommu->ir_table && (sts & DMA_GSTS_IRES)) {
                        irta = virt_to_phys(iommu->ir_table->base);
                        seq_printf(m, " IR table address:%llx\n", irta);
                        ir_tbl_remap_entry_show(m, iommu);
index 6fa6de2b6ad586d933945f74de0bc79ef8691664..4be549478691884f75e148ed5e1bd23b84a23ac9 100644 (file)
@@ -4261,10 +4261,11 @@ static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
 
        /* we know that the this iommu should be at offset 0xa000 from vtbar */
        drhd = dmar_find_matched_drhd_unit(pdev);
-       if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
-                           TAINT_FIRMWARE_WORKAROUND,
-                           "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
+       if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
+               pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
+               add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
                pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+       }
 }
 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
 
@@ -4460,14 +4461,16 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
        struct dmar_rmrr_unit *rmrru;
 
        rmrr = (struct acpi_dmar_reserved_memory *)header;
-       if (rmrr_sanity_check(rmrr))
-               WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
+       if (rmrr_sanity_check(rmrr)) {
+               pr_warn(FW_BUG
                           "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n"
                           "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
                           rmrr->base_address, rmrr->end_address,
                           dmi_get_system_info(DMI_BIOS_VENDOR),
                           dmi_get_system_info(DMI_BIOS_VERSION),
                           dmi_get_system_info(DMI_PRODUCT_VERSION));
+               add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+       }
 
        rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
        if (!rmrru)
@@ -5130,6 +5133,9 @@ int __init intel_iommu_init(void)
 
        down_write(&dmar_global_lock);
 
+       if (!no_iommu)
+               intel_iommu_debugfs_init();
+
        if (no_iommu || dmar_disabled) {
                /*
                 * We exit the function here to ensure IOMMU's remapping and
@@ -5193,6 +5199,7 @@ int __init intel_iommu_init(void)
 
        init_iommu_pm_ops();
 
+       down_read(&dmar_global_lock);
        for_each_active_iommu(iommu, drhd) {
                iommu_device_sysfs_add(&iommu->iommu, NULL,
                                       intel_iommu_groups,
@@ -5200,6 +5207,7 @@ int __init intel_iommu_init(void)
                iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
                iommu_device_register(&iommu->iommu);
        }
+       up_read(&dmar_global_lock);
 
        bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
        if (si_domain && !hw_pass_through)
@@ -5210,7 +5218,6 @@ int __init intel_iommu_init(void)
        down_read(&dmar_global_lock);
        if (probe_acpi_namespace_devices())
                pr_warn("ACPI name space devices didn't probe correctly\n");
-       up_read(&dmar_global_lock);
 
        /* Finally, we enable the DMA remapping hardware. */
        for_each_iommu(iommu, drhd) {
@@ -5219,10 +5226,11 @@ int __init intel_iommu_init(void)
 
                iommu_disable_protect_mem_regions(iommu);
        }
+       up_read(&dmar_global_lock);
+
        pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
 
        intel_iommu_enabled = 1;
-       intel_iommu_debugfs_init();
 
        return 0;
 
@@ -5700,8 +5708,10 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
        u64 phys = 0;
 
        pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
-       if (pte)
-               phys = dma_pte_addr(pte);
+       if (pte && dma_pte_present(pte))
+               phys = dma_pte_addr(pte) +
+                       (iova & (BIT_MASK(level_to_offset_bits(level) +
+                                               VTD_PAGE_SHIFT) - 1));
 
        return phys;
 }
index 983b08477e645902a6e477fa9adc3226b9143e13..04fbd4bf0ff9fd2140ff01a0063352eef047fb7f 100644 (file)
@@ -468,7 +468,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
        arm_lpae_iopte *ptep = data->pgd;
        int ret, lvl = data->start_level;
        arm_lpae_iopte prot;
-       long iaext = (long)iova >> cfg->ias;
+       long iaext = (s64)iova >> cfg->ias;
 
        /* If no access, then nothing to do */
        if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
@@ -645,7 +645,7 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
        struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
        struct io_pgtable_cfg *cfg = &data->iop.cfg;
        arm_lpae_iopte *ptep = data->pgd;
-       long iaext = (long)iova >> cfg->ias;
+       long iaext = (s64)iova >> cfg->ias;
 
        if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
                return 0;
index c1f7af9d9ae719a8b01cf7c84760ce6cd9781070..1eec9d4649d51ac5ae835cc8ae75377c6ca5db04 100644 (file)
@@ -34,6 +34,7 @@
 #define GICD_INT_NMI_PRI       (GICD_INT_DEF_PRI & ~0x80)
 
 #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996    (1ULL << 0)
+#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539  (1ULL << 1)
 
 struct redist_region {
        void __iomem            *redist_base;
@@ -1464,6 +1465,15 @@ static bool gic_enable_quirk_msm8996(void *data)
        return true;
 }
 
+static bool gic_enable_quirk_cavium_38539(void *data)
+{
+       struct gic_chip_data *d = data;
+
+       d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
+
+       return true;
+}
+
 static bool gic_enable_quirk_hip06_07(void *data)
 {
        struct gic_chip_data *d = data;
@@ -1502,6 +1512,19 @@ static const struct gic_quirk gic_quirks[] = {
                .mask   = 0xffffffff,
                .init   = gic_enable_quirk_hip06_07,
        },
+       {
+               /*
+                * Reserved register accesses generate a Synchronous
+                * External Abort. This erratum applies to:
+                * - ThunderX: CN88xx
+                * - OCTEON TX: CN83xx, CN81xx
+                * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
+                */
+               .desc   = "GICv3: Cavium erratum 38539",
+               .iidr   = 0xa000034c,
+               .mask   = 0xe8f00fff,
+               .init   = gic_enable_quirk_cavium_38539,
+       },
        {
        }
 };
@@ -1577,7 +1600,12 @@ static int __init gic_init_bases(void __iomem *dist_base,
        pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
        pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
 
-       gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
+       /*
+        * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
+        * architecture spec (which says that reserved registers are RES0).
+        */
+       if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
+               gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
 
        gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
                                                 &gic_data);
index 125605987b443b6f123070de71d4b72679f298c3..e7dec328c7cfdffeede1bf04b701ac9bca899301 100644 (file)
@@ -312,9 +312,16 @@ static const struct i2c_device_id wf_ad7417_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wf_ad7417_id);
 
+static const struct of_device_id wf_ad7417_of_id[] = {
+       { .compatible = "ad7417", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wf_ad7417_of_id);
+
 static struct i2c_driver wf_ad7417_driver = {
        .driver = {
                .name   = "wf_ad7417",
+               .of_match_table = wf_ad7417_of_id,
        },
        .probe          = wf_ad7417_probe,
        .remove         = wf_ad7417_remove,
index 67daeec94b44a06eca49905c7ed86c979c779e52..2470e5a725c812f5342cdf40fd88aab61a5ca414 100644 (file)
@@ -580,9 +580,16 @@ static const struct i2c_device_id wf_fcu_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wf_fcu_id);
 
+static const struct of_device_id wf_fcu_of_id[] = {
+       { .compatible = "fcu", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wf_fcu_of_id);
+
 static struct i2c_driver wf_fcu_driver = {
        .driver = {
                .name   = "wf_fcu",
+               .of_match_table = wf_fcu_of_id,
        },
        .probe          = wf_fcu_probe,
        .remove         = wf_fcu_remove,
index 282c28a17ea1ae936dd7b9bdafd61f344fa3ef7c..1e5fa09845e77be5efb2f06ca8836fb7214cf032 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/wait.h>
 #include <linux/i2c.h>
+#include <linux/of_device.h>
 #include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/io.h>
@@ -91,9 +92,14 @@ static int wf_lm75_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {      
        struct wf_lm75_sensor *lm;
-       int rc, ds1775 = id->driver_data;
+       int rc, ds1775;
        const char *name, *loc;
 
+       if (id)
+               ds1775 = id->driver_data;
+       else
+               ds1775 = !!of_device_get_match_data(&client->dev);
+
        DBG("wf_lm75: creating  %s device at address 0x%02x\n",
            ds1775 ? "ds1775" : "lm75", client->addr);
 
@@ -164,9 +170,17 @@ static const struct i2c_device_id wf_lm75_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wf_lm75_id);
 
+static const struct of_device_id wf_lm75_of_id[] = {
+       { .compatible = "lm75", .data = (void *)0},
+       { .compatible = "ds1775", .data = (void *)1 },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wf_lm75_of_id);
+
 static struct i2c_driver wf_lm75_driver = {
        .driver = {
                .name   = "wf_lm75",
+               .of_match_table = wf_lm75_of_id,
        },
        .probe          = wf_lm75_probe,
        .remove         = wf_lm75_remove,
index b03a33b803b793c110a911c11610d8b98c0e4880..d011899c0a8a1fb14f51b211407a75963bcd1d68 100644 (file)
@@ -166,9 +166,16 @@ static const struct i2c_device_id wf_lm87_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wf_lm87_id);
 
+static const struct of_device_id wf_lm87_of_id[] = {
+       { .compatible = "lm87cimt", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wf_lm87_of_id);
+
 static struct i2c_driver wf_lm87_driver = {
        .driver = {
                .name   = "wf_lm87",
+               .of_match_table = wf_lm87_of_id,
        },
        .probe          = wf_lm87_probe,
        .remove         = wf_lm87_remove,
index e666cc02068387723c86f237cf60939ba8ac71f7..1e7b03d44ad975413a8086f12a5ff0b95d339d90 100644 (file)
@@ -120,9 +120,16 @@ static const struct i2c_device_id wf_max6690_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wf_max6690_id);
 
+static const struct of_device_id wf_max6690_of_id[] = {
+       { .compatible = "max6690", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wf_max6690_of_id);
+
 static struct i2c_driver wf_max6690_driver = {
        .driver = {
                .name           = "wf_max6690",
+               .of_match_table = wf_max6690_of_id,
        },
        .probe          = wf_max6690_probe,
        .remove         = wf_max6690_remove,
index c84ec49c37415c90366de89b70633e079f5e30ac..cb75dc03561670c2864ca4054355f5db1e0043ec 100644 (file)
@@ -341,9 +341,16 @@ static const struct i2c_device_id wf_sat_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wf_sat_id);
 
+static const struct of_device_id wf_sat_of_id[] = {
+       { .compatible = "smu-sat", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wf_sat_of_id);
+
 static struct i2c_driver wf_sat_driver = {
        .driver = {
                .name           = "wf_smu_sat",
+               .of_match_table = wf_sat_of_id,
        },
        .probe          = wf_sat_probe,
        .remove         = wf_sat_remove,
index 031eb64549af51018eefda562f43a67a0d07e85d..282c9ef68ed22dfe73c58f11916fff9c44582228 100644 (file)
@@ -712,13 +712,14 @@ static int at24_probe(struct i2c_client *client)
         * chip is functional.
         */
        err = at24_read(at24, 0, &test_byte, 1);
-       pm_runtime_idle(dev);
        if (err) {
                pm_runtime_disable(dev);
                regulator_disable(at24->vcc_reg);
                return -ENODEV;
        }
 
+       pm_runtime_idle(dev);
+
        if (writable)
                dev_info(dev, "%u byte %s EEPROM, writable, %u bytes/write\n",
                         byte_len, client->name, at24->write_max);
index aa54d359dab74beb7ea27850b5ac144511df3fdc..a971c4bcc442b11ab76c5425bbab46c89bf32dfd 100644 (file)
@@ -1732,8 +1732,11 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
         * the erase operation does not exceed the max_busy_timeout, we should
         * use R1B response. Or we need to prevent the host from doing hw busy
         * detection, which is done by converting to a R1 response instead.
+        * Note, some hosts requires R1B, which also means they are on their own
+        * when it comes to deal with the busy timeout.
         */
-       if (card->host->max_busy_timeout &&
+       if (!(card->host->caps & MMC_CAP_NEED_RSP_BUSY) &&
+           card->host->max_busy_timeout &&
            busy_timeout > card->host->max_busy_timeout) {
                cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
        } else {
index f6912ded652dcd3e60081e0f9504fd8ed105d163..de14b5845f525e0e8279465695eae885f7bd3ff9 100644 (file)
@@ -1910,9 +1910,12 @@ static int mmc_sleep(struct mmc_host *host)
         * If the max_busy_timeout of the host is specified, validate it against
         * the sleep cmd timeout. A failure means we need to prevent the host
         * from doing hw busy detection, which is done by converting to a R1
-        * response instead of a R1B.
+        * response instead of a R1B. Note, some hosts requires R1B, which also
+        * means they are on their own when it comes to deal with the busy
+        * timeout.
         */
-       if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) {
+       if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
+           (timeout_ms > host->max_busy_timeout)) {
                cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
        } else {
                cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
index da425ee2d9bf50da3cee4962dcdb5b6fe341ab6e..e025604e17d45aeafdb5e30488c3380bb3f2dc63 100644 (file)
@@ -542,9 +542,11 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
         * If the max_busy_timeout of the host is specified, make sure it's
         * enough to fit the used timeout_ms. In case it's not, let's instruct
         * the host to avoid HW busy detection, by converting to a R1 response
-        * instead of a R1B.
+        * instead of a R1B. Note, some hosts requires R1B, which also means
+        * they are on their own when it comes to deal with the busy timeout.
         */
-       if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
+       if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
+           (timeout_ms > host->max_busy_timeout))
                use_r1b_resp = false;
 
        cmd.opcode = MMC_SWITCH;
index c3a160c1804772963840deab9b593caa4c1640e0..3955fa5db43c69f60b50a8abbcfcb90e43313f7e 100644 (file)
@@ -1590,7 +1590,7 @@ static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
        return 0;
 }
 
-void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
+static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
 {
        struct sdhci_host *host = mmc_priv(mmc);
        unsigned long flags;
index 882053151a4741098684898e70bf5f1e03ea45ad..c4978177ef88c2b03e68dc40010adca958ade70d 100644 (file)
@@ -1192,6 +1192,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
        if (of_find_property(dev->of_node, "dmas", NULL))
                sdhci_switch_external_dma(host, true);
 
+       /* R1B responses is required to properly manage HW busy detection. */
+       mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+
        ret = sdhci_setup_host(host);
        if (ret)
                goto err_put_sync;
index 5eea8d70a85d7ba9ecf2a9b3217581e59f552f08..ce15a05f23d41988dcbb36f2d08dc8cd6733cc4f 100644 (file)
@@ -262,10 +262,26 @@ static int gl9750_execute_tuning(struct sdhci_host *host, u32 opcode)
        return 0;
 }
 
+static void gli_pcie_enable_msi(struct sdhci_pci_slot *slot)
+{
+       int ret;
+
+       ret = pci_alloc_irq_vectors(slot->chip->pdev, 1, 1,
+                                   PCI_IRQ_MSI | PCI_IRQ_MSIX);
+       if (ret < 0) {
+               pr_warn("%s: enable PCI MSI failed, error=%d\n",
+                      mmc_hostname(slot->host->mmc), ret);
+               return;
+       }
+
+       slot->host->irq = pci_irq_vector(slot->chip->pdev, 0);
+}
+
 static int gli_probe_slot_gl9750(struct sdhci_pci_slot *slot)
 {
        struct sdhci_host *host = slot->host;
 
+       gli_pcie_enable_msi(slot);
        slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
        sdhci_enable_v4_mode(host);
 
@@ -276,6 +292,7 @@ static int gli_probe_slot_gl9755(struct sdhci_pci_slot *slot)
 {
        struct sdhci_host *host = slot->host;
 
+       gli_pcie_enable_msi(slot);
        slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
        sdhci_enable_v4_mode(host);
 
index 403ac44a737822cbd754021ad644164ea6e0dc79..a25c3a4d3f6cbb9dfd1d9d65607e31f102011fdb 100644 (file)
@@ -1552,6 +1552,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
        if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
                host->mmc->caps |= MMC_CAP_1_8V_DDR;
 
+       /* R1B responses is required to properly manage HW busy detection. */
+       host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+
        tegra_sdhci_parse_dt(host);
 
        tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
index 1cc2cd894f877c0f6d9117ce008b7ef3fdfbcbcc..c81698550e5a78b091a761638ee35f887aacba1d 100644 (file)
@@ -50,11 +50,6 @@ struct arp_pkt {
 };
 #pragma pack()
 
-static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
-{
-       return (struct arp_pkt *)skb_network_header(skb);
-}
-
 /* Forward declaration */
 static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
                                      bool strict_match);
@@ -553,10 +548,11 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
        spin_unlock(&bond->mode_lock);
 }
 
-static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond)
+static struct slave *rlb_choose_channel(struct sk_buff *skb,
+                                       struct bonding *bond,
+                                       const struct arp_pkt *arp)
 {
        struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
-       struct arp_pkt *arp = arp_pkt(skb);
        struct slave *assigned_slave, *curr_active_slave;
        struct rlb_client_info *client_info;
        u32 hash_index = 0;
@@ -653,8 +649,12 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
  */
 static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
 {
-       struct arp_pkt *arp = arp_pkt(skb);
        struct slave *tx_slave = NULL;
+       struct arp_pkt *arp;
+
+       if (!pskb_network_may_pull(skb, sizeof(*arp)))
+               return NULL;
+       arp = (struct arp_pkt *)skb_network_header(skb);
 
        /* Don't modify or load balance ARPs that do not originate locally
         * (e.g.,arrive via a bridge).
@@ -664,7 +664,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
 
        if (arp->op_code == htons(ARPOP_REPLY)) {
                /* the arp must be sent on the selected rx channel */
-               tx_slave = rlb_choose_channel(skb, bond);
+               tx_slave = rlb_choose_channel(skb, bond, arp);
                if (tx_slave)
                        bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr,
                                          tx_slave->dev->addr_len);
@@ -676,7 +676,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
                 * When the arp reply is received the entry will be updated
                 * with the correct unicast address of the client.
                 */
-               tx_slave = rlb_choose_channel(skb, bond);
+               tx_slave = rlb_choose_channel(skb, bond, arp);
 
                /* The ARP reply packets must be delayed so that
                 * they can cancel out the influence of the ARP request.
index 6ee06a49fb4cdc4d8ffcb029918590539d192ee0..68834a2853c9d86a290a2f7cf2de86578f4ad44a 100644 (file)
@@ -883,6 +883,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
                                = { .len = sizeof(struct can_bittiming) },
        [IFLA_CAN_DATA_BITTIMING_CONST]
                                = { .len = sizeof(struct can_bittiming_const) },
+       [IFLA_CAN_TERMINATION]  = { .type = NLA_U16 },
 };
 
 static int can_validate(struct nlattr *tb[], struct nlattr *data[],
index 8c92895496881cb9d11adf4ee459bd549b247fe3..2f993e673ec7476261a900f76588970ff4532f49 100644 (file)
@@ -2769,6 +2769,8 @@ static u64 mv88e6xxx_devlink_atu_bin_get(struct mv88e6xxx_chip *chip,
                goto unlock;
        }
 
+       occupancy &= MV88E6XXX_G2_ATU_STATS_MASK;
+
 unlock:
        mv88e6xxx_reg_unlock(chip);
 
index 01503014b1ee761571497c5da84758b80a0cce8b..8fd483020c5b91685731cbac02cfe3b4328ead42 100644 (file)
@@ -1099,6 +1099,13 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
 {
        int err, irq, virq;
 
+       chip->g2_irq.masked = ~0;
+       mv88e6xxx_reg_lock(chip);
+       err = mv88e6xxx_g2_int_mask(chip, ~chip->g2_irq.masked);
+       mv88e6xxx_reg_unlock(chip);
+       if (err)
+               return err;
+
        chip->g2_irq.domain = irq_domain_add_simple(
                chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip);
        if (!chip->g2_irq.domain)
@@ -1108,7 +1115,6 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
                irq_create_mapping(chip->g2_irq.domain, irq);
 
        chip->g2_irq.chip = mv88e6xxx_g2_irq_chip;
-       chip->g2_irq.masked = ~0;
 
        chip->device_irq = irq_find_mapping(chip->g1_irq.domain,
                                            MV88E6XXX_G1_STS_IRQ_DEVICE);
index 03ba6d25f7fefb9b8b2d1dae8ea401ee23df13e5..7edea5741a5fdfb2baf2ba4b8aee098562f74bda 100644 (file)
@@ -1741,7 +1741,8 @@ static void sja1105_teardown(struct dsa_switch *ds)
                if (!dsa_is_user_port(ds, port))
                        continue;
 
-               kthread_destroy_worker(sp->xmit_worker);
+               if (sp->xmit_worker)
+                       kthread_destroy_worker(sp->xmit_worker);
        }
 
        sja1105_tas_teardown(ds);
index e0611cba87f9586f23d85d8617c010f66e9230f6..15b31cddc054b754ce495569d560d232df84a073 100644 (file)
@@ -2135,7 +2135,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
                return -ENOSPC;
 
        index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
-       if (index > RXCHK_BRCM_TAG_MAX)
+       if (index >= RXCHK_BRCM_TAG_MAX)
                return -ENOSPC;
 
        /* Location is the classification ID, and index is the position
index f9a8151f092c726dc553ec60459c6ba941e4a6e6..c5c8effc013970faab50b985215763b7d5fc451d 100644 (file)
@@ -10982,13 +10982,13 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
        struct bnxt *bp = netdev_priv(dev);
 
        if (netif_running(dev))
-               bnxt_close_nic(bp, false, false);
+               bnxt_close_nic(bp, true, false);
 
        dev->mtu = new_mtu;
        bnxt_set_ring_params(bp);
 
        if (netif_running(dev))
-               return bnxt_open_nic(bp, false, false);
+               return bnxt_open_nic(bp, true, false);
 
        return 0;
 }
index e8fc1671c5815e761f80f70a80cd911c115e80ae..1f67e6729a2c7ea58a448c778ad4944f926405f9 100644 (file)
@@ -2007,8 +2007,8 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
        struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
        struct hwrm_nvm_install_update_input install = {0};
        const struct firmware *fw;
-       int rc, hwrm_err = 0;
        u32 item_len;
+       int rc = 0;
        u16 index;
 
        bnxt_hwrm_fw_set_time(bp);
@@ -2052,15 +2052,14 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
                        memcpy(kmem, fw->data, fw->size);
                        modify.host_src_addr = cpu_to_le64(dma_handle);
 
-                       hwrm_err = hwrm_send_message(bp, &modify,
-                                                    sizeof(modify),
-                                                    FLASH_PACKAGE_TIMEOUT);
+                       rc = hwrm_send_message(bp, &modify, sizeof(modify),
+                                              FLASH_PACKAGE_TIMEOUT);
                        dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
                                          dma_handle);
                }
        }
        release_firmware(fw);
-       if (rc || hwrm_err)
+       if (rc)
                goto err_exit;
 
        if ((install_type & 0xffff) == 0)
@@ -2069,20 +2068,19 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
        install.install_type = cpu_to_le32(install_type);
 
        mutex_lock(&bp->hwrm_cmd_lock);
-       hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
-                                     INSTALL_PACKAGE_TIMEOUT);
-       if (hwrm_err) {
+       rc = _hwrm_send_message(bp, &install, sizeof(install),
+                               INSTALL_PACKAGE_TIMEOUT);
+       if (rc) {
                u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
 
                if (resp->error_code && error_code ==
                    NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
                        install.flags |= cpu_to_le16(
                               NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
-                       hwrm_err = _hwrm_send_message(bp, &install,
-                                                     sizeof(install),
-                                                     INSTALL_PACKAGE_TIMEOUT);
+                       rc = _hwrm_send_message(bp, &install, sizeof(install),
+                                               INSTALL_PACKAGE_TIMEOUT);
                }
-               if (hwrm_err)
+               if (rc)
                        goto flash_pkg_exit;
        }
 
@@ -2094,7 +2092,7 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
 flash_pkg_exit:
        mutex_unlock(&bp->hwrm_cmd_lock);
 err_exit:
-       if (hwrm_err == -EACCES)
+       if (rc == -EACCES)
                bnxt_print_admin_err(bp);
        return rc;
 }
index 649842a8aa285e45eb929493fee2e40603339258..97f90edbc06831303813eb1a70db61e8c620fc34 100644 (file)
@@ -5381,12 +5381,11 @@ static inline bool is_x_10g_port(const struct link_config *lc)
 static int cfg_queues(struct adapter *adap)
 {
        u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
+       u32 i, n10g = 0, qidx = 0, n1g = 0;
+       u32 ncpus = num_online_cpus();
        u32 niqflint, neq, num_ulds;
        struct sge *s = &adap->sge;
-       u32 i, n10g = 0, qidx = 0;
-#ifndef CONFIG_CHELSIO_T4_DCB
-       int q10g = 0;
-#endif
+       u32 q10g = 0, q1g;
 
        /* Reduce memory usage in kdump environment, disable all offload. */
        if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
@@ -5424,44 +5423,50 @@ static int cfg_queues(struct adapter *adap)
                n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
 
        avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
+
+       /* We default to 1 queue per non-10G port and up to # of cores queues
+        * per 10G port.
+        */
+       if (n10g)
+               q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
+
+       n1g = adap->params.nports - n10g;
 #ifdef CONFIG_CHELSIO_T4_DCB
        /* For Data Center Bridging support we need to be able to support up
         * to 8 Traffic Priorities; each of which will be assigned to its
         * own TX Queue in order to prevent Head-Of-Line Blocking.
         */
+       q1g = 8;
        if (adap->params.nports * 8 > avail_eth_qsets) {
                dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
                        avail_eth_qsets, adap->params.nports * 8);
                return -ENOMEM;
        }
 
-       for_each_port(adap, i) {
-               struct port_info *pi = adap2pinfo(adap, i);
+       if (adap->params.nports * ncpus < avail_eth_qsets)
+               q10g = max(8U, ncpus);
+       else
+               q10g = max(8U, q10g);
 
-               pi->first_qset = qidx;
-               pi->nqsets = is_kdump_kernel() ? 1 : 8;
-               qidx += pi->nqsets;
-       }
-#else /* !CONFIG_CHELSIO_T4_DCB */
-       /* We default to 1 queue per non-10G port and up to # of cores queues
-        * per 10G port.
-        */
-       if (n10g)
-               q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
-       if (q10g > netif_get_num_default_rss_queues())
-               q10g = netif_get_num_default_rss_queues();
+       while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g))
+               q10g--;
 
-       if (is_kdump_kernel())
+#else /* !CONFIG_CHELSIO_T4_DCB */
+       q1g = 1;
+       q10g = min(q10g, ncpus);
+#endif /* !CONFIG_CHELSIO_T4_DCB */
+       if (is_kdump_kernel()) {
                q10g = 1;
+               q1g = 1;
+       }
 
        for_each_port(adap, i) {
                struct port_info *pi = adap2pinfo(adap, i);
 
                pi->first_qset = qidx;
-               pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
+               pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g;
                qidx += pi->nqsets;
        }
-#endif /* !CONFIG_CHELSIO_T4_DCB */
 
        s->ethqsets = qidx;
        s->max_ethqsets = qidx;   /* MSI-X may lower it later */
@@ -5473,7 +5478,7 @@ static int cfg_queues(struct adapter *adap)
                 * capped by the number of available cores.
                 */
                num_ulds = adap->num_uld + adap->num_ofld_uld;
-               i = min_t(u32, MAX_OFLD_QSETS, num_online_cpus());
+               i = min_t(u32, MAX_OFLD_QSETS, ncpus);
                avail_uld_qsets = roundup(i, adap->params.nports);
                if (avail_qsets < num_ulds * adap->params.nports) {
                        adap->params.offload = 0;
index fd93d542f497b8535cba6ac7c5859a3d12e7e867..ca74a684a9040e054b03784cac603077b33bf277 100644 (file)
@@ -1,4 +1,5 @@
 /* Copyright 2008 - 2016 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -123,7 +124,22 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
 #define FSL_QMAN_MAX_OAL       127
 
 /* Default alignment for start of data in an Rx FD */
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+/* aligning data start to 64 avoids DMA transaction splits, unless the buffer
+ * is crossing a 4k page boundary
+ */
+#define DPAA_FD_DATA_ALIGNMENT  (fman_has_errata_a050385() ? 64 : 16)
+/* aligning to 256 avoids DMA transaction splits caused by 4k page boundary
+ * crossings; also, all SG fragments except the last must have a size multiple
+ * of 256 to avoid DMA transaction splits
+ */
+#define DPAA_A050385_ALIGN 256
+#define DPAA_FD_RX_DATA_ALIGNMENT (fman_has_errata_a050385() ? \
+                                  DPAA_A050385_ALIGN : 16)
+#else
 #define DPAA_FD_DATA_ALIGNMENT  16
+#define DPAA_FD_RX_DATA_ALIGNMENT DPAA_FD_DATA_ALIGNMENT
+#endif
 
 /* The DPAA requires 256 bytes reserved and mapped for the SGT */
 #define DPAA_SGT_SIZE 256
@@ -158,8 +174,13 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
 #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
 #define DPAA_TIME_STAMP_SIZE 8
 #define DPAA_HASH_RESULTS_SIZE 8
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+#define DPAA_RX_PRIV_DATA_SIZE (DPAA_A050385_ALIGN - (DPAA_PARSE_RESULTS_SIZE\
+        + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE))
+#else
 #define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \
                                        dpaa_rx_extra_headroom)
+#endif
 
 #define DPAA_ETH_PCD_RXQ_NUM   128
 
@@ -180,7 +201,12 @@ static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
 
 #define DPAA_BP_RAW_SIZE 4096
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+#define dpaa_bp_size(raw_size) (SKB_WITH_OVERHEAD(raw_size) & \
+                               ~(DPAA_A050385_ALIGN - 1))
+#else
 #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
+#endif
 
 static int dpaa_max_frm;
 
@@ -1192,7 +1218,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
        buf_prefix_content.pass_prs_result = true;
        buf_prefix_content.pass_hash_result = true;
        buf_prefix_content.pass_time_stamp = true;
-       buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
+       buf_prefix_content.data_align = DPAA_FD_RX_DATA_ALIGNMENT;
 
        rx_p = &params.specific_params.rx_params;
        rx_p->err_fqid = errq->fqid;
@@ -1662,6 +1688,8 @@ static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
        return CHECKSUM_NONE;
 }
 
+#define PTR_IS_ALIGNED(x, a) (IS_ALIGNED((unsigned long)(x), (a)))
+
 /* Build a linear skb around the received buffer.
  * We are guaranteed there is enough room at the end of the data buffer to
  * accommodate the shared info area of the skb.
@@ -1733,8 +1761,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
 
                sg_addr = qm_sg_addr(&sgt[i]);
                sg_vaddr = phys_to_virt(sg_addr);
-               WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
-                                   SMP_CACHE_BYTES));
+               WARN_ON(!PTR_IS_ALIGNED(sg_vaddr, SMP_CACHE_BYTES));
 
                dma_unmap_page(priv->rx_dma_dev, sg_addr,
                               DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
@@ -2022,6 +2049,75 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
        return 0;
 }
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s)
+{
+       struct dpaa_priv *priv = netdev_priv(net_dev);
+       struct sk_buff *new_skb, *skb = *s;
+       unsigned char *start, i;
+
+       /* check linear buffer alignment */
+       if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN))
+               goto workaround;
+
+       /* linear buffers just need to have an aligned start */
+       if (!skb_is_nonlinear(skb))
+               return 0;
+
+       /* linear data size for nonlinear skbs needs to be aligned */
+       if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN))
+               goto workaround;
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+               /* all fragments need to have aligned start addresses */
+               if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN))
+                       goto workaround;
+
+               /* all but last fragment need to have aligned sizes */
+               if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) &&
+                   (i < skb_shinfo(skb)->nr_frags - 1))
+                       goto workaround;
+       }
+
+       return 0;
+
+workaround:
+       /* copy all the skb content into a new linear buffer */
+       new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 +
+                                               priv->tx_headroom);
+       if (!new_skb)
+               return -ENOMEM;
+
+       /* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */
+       skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD);
+
+       /* Workaround for DPAA_A050385 requires data start to be aligned */
+       start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN);
+       if (start - new_skb->data != 0)
+               skb_reserve(new_skb, start - new_skb->data);
+
+       skb_put(new_skb, skb->len);
+       skb_copy_bits(skb, 0, new_skb->data, skb->len);
+       skb_copy_header(new_skb, skb);
+       new_skb->dev = skb->dev;
+
+       /* We move the headroom when we align it so we have to reset the
+        * network and transport header offsets relative to the new data
+        * pointer. The checksum offload relies on these offsets.
+        */
+       skb_set_network_header(new_skb, skb_network_offset(skb));
+       skb_set_transport_header(new_skb, skb_transport_offset(skb));
+
+       /* TODO: does timestamping need the result in the old skb? */
+       dev_kfree_skb(skb);
+       *s = new_skb;
+
+       return 0;
+}
+#endif
+
 static netdev_tx_t
 dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
 {
@@ -2068,6 +2164,14 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
                nonlinear = skb_is_nonlinear(skb);
        }
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+       if (unlikely(fman_has_errata_a050385())) {
+               if (dpaa_a050385_wa(net_dev, &skb))
+                       goto enomem;
+               nonlinear = skb_is_nonlinear(skb);
+       }
+#endif
+
        if (nonlinear) {
                /* Just create a S/G fd based on the skb */
                err = skb_to_sg_fd(priv, skb, &fd);
@@ -2741,9 +2845,7 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
        headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
                DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
 
-       return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom,
-                                             DPAA_FD_DATA_ALIGNMENT) :
-                                       headroom;
+       return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
 }
 
 static int dpaa_eth_probe(struct platform_device *pdev)
index 4432a59904c77772f4ca1521fc8a4904485e87d2..23c5fef2f1ad1f274fe66db138e9cb662493f4c0 100644 (file)
@@ -2529,15 +2529,15 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
                return -EINVAL;
        }
 
-       cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
+       cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
        if (cycle > 0xFFFF) {
                dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
                return -EINVAL;
        }
 
-       cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
+       cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
        if (cycle > 0xFFFF) {
-               dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
+               dev_err(dev, "Tx coalesced usec exceed hardware limitation\n");
                return -EINVAL;
        }
 
index 0139cb9042ec74cf8da79845b823f6cd96eea8eb..34150182cc35c06c65141b7104d910e6e4465ac7 100644 (file)
@@ -8,3 +8,31 @@ config FSL_FMAN
        help
                Freescale Data-Path Acceleration Architecture Frame Manager
                (FMan) support
+
+config DPAA_ERRATUM_A050385
+       bool
+       depends on ARM64 && FSL_DPAA
+       default y
+       help
+               DPAA FMan erratum A050385 software workaround implementation:
+               align buffers, data start, SG fragment length to avoid FMan DMA
+               splits.
+               FMAN DMA read or writes under heavy traffic load may cause FMAN
+               internal resource leak thus stopping further packet processing.
+               The FMAN internal queue can overflow when FMAN splits single
+               read or write transactions into multiple smaller transactions
+               such that more than 17 AXI transactions are in flight from FMAN
+               to interconnect. When the FMAN internal queue overflows, it can
+               stall further packet processing. The issue can occur with any
+               one of the following three conditions:
+               1. FMAN AXI transaction crosses 4K address boundary (Errata
+               A010022)
+               2. FMAN DMA address for an AXI transaction is not 16 byte
+               aligned, i.e. the last 4 bits of an address are non-zero
+               3. Scatter Gather (SG) frames have more than one SG buffer in
+               the SG list and any one of the buffers, except the last
+               buffer in the SG list has data size that is not a multiple
+               of 16 bytes, i.e., other than 16, 32, 48, 64, etc.
+               With any one of the above three conditions present, there is
+               likelihood of stalled FMAN packet processing, especially under
+               stress with multiple ports injecting line-rate traffic.
index 934111def0becb5664e119b4d32bb672496c66e7..f151d6e111dd9a6dd24ab2456a3957846c449174 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -566,6 +567,10 @@ struct fman_cfg {
        u32 qmi_def_tnums_thresh;
 };
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+static bool fman_has_err_a050385;
+#endif
+
 static irqreturn_t fman_exceptions(struct fman *fman,
                                   enum fman_exceptions exception)
 {
@@ -2518,6 +2523,14 @@ struct fman *fman_bind(struct device *fm_dev)
 }
 EXPORT_SYMBOL(fman_bind);
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+bool fman_has_errata_a050385(void)
+{
+       return fman_has_err_a050385;
+}
+EXPORT_SYMBOL(fman_has_errata_a050385);
+#endif
+
 static irqreturn_t fman_err_irq(int irq, void *handle)
 {
        struct fman *fman = (struct fman *)handle;
@@ -2845,6 +2858,11 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
                goto fman_free;
        }
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+       fman_has_err_a050385 =
+               of_property_read_bool(fm_node, "fsl,erratum-a050385");
+#endif
+
        return fman;
 
 fman_node_put:
index 935c317fa69642c9707fc583d1f34a12061b390e..f2ede1360f03a9cc1f312d84c87df854dc5f4847 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -398,6 +399,10 @@ u16 fman_get_max_frm(void);
 
 int fman_get_rx_extra_headroom(void);
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+bool fman_has_errata_a050385(void);
+#endif
+
 struct fman *fman_bind(struct device *dev);
 
 #endif /* __FM_H */
index 1b0313900f98565b046a28610fad8cce16260ea6..d87158acdf6fd6563829ac239847544d14cc4c17 100644 (file)
@@ -46,6 +46,7 @@ enum HCLGE_MBX_OPCODE {
        HCLGE_MBX_PUSH_VLAN_INFO,       /* (PF -> VF) push port base vlan */
        HCLGE_MBX_GET_MEDIA_TYPE,       /* (VF -> PF) get media type */
        HCLGE_MBX_PUSH_PROMISC_INFO,    /* (PF -> VF) push vf promisc info */
+       HCLGE_MBX_VF_UNINIT,            /* (VF -> PF) vf is unintializing */
 
        HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
        HCLGE_MBX_PUSH_LINK_STATUS,     /* (M7 -> PF) get port link status */
index acb796cc10d0bbe2ee701db1fabeba198a527dd6..a7f40aa1a0ea6d51b456cf4d81a6484b5a09c239 100644 (file)
@@ -1711,7 +1711,7 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
        netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
 
        return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
-               kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
+               kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP;
 }
 
 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
index 492bc944646372bea3db8212884e9d3914dd2a5e..d3b0cd74ecd231bd43e774a785c41363b8c6d963 100644 (file)
@@ -2446,10 +2446,12 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
 
 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
 {
+       struct hclge_mac *mac = &hdev->hw.mac;
        int ret;
 
        duplex = hclge_check_speed_dup(duplex, speed);
-       if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
+       if (!mac->support_autoneg && mac->speed == speed &&
+           mac->duplex == duplex)
                return 0;
 
        ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
@@ -7743,16 +7745,27 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
        struct hclge_desc desc;
        int ret;
 
-       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
-
+       /* read current vlan filter parameter */
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
        req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
        req->vlan_type = vlan_type;
-       req->vlan_fe = filter_en ? fe_type : 0;
        req->vf_id = vf_id;
 
+       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "failed to get vlan filter config, ret = %d.\n", ret);
+               return ret;
+       }
+
+       /* modify and write new config parameter */
+       hclge_cmd_reuse_desc(&desc, false);
+       req->vlan_fe = filter_en ?
+                       (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
+
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret)
-               dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
+               dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
                        ret);
 
        return ret;
@@ -8270,6 +8283,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
                        kfree(vlan);
                }
        }
+       clear_bit(vport->vport_id, hdev->vf_vlan_full);
 }
 
 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
@@ -8486,6 +8500,28 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
        }
 }
 
+static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
+{
+       struct hclge_vlan_info *vlan_info;
+       struct hclge_vport *vport;
+       int ret;
+       int vf;
+
+       /* clear port base vlan for all vf */
+       for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
+               vport = &hdev->vport[vf];
+               vlan_info = &vport->port_base_vlan_cfg.vlan_info;
+
+               ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+                                              vport->vport_id,
+                                              vlan_info->vlan_tag, true);
+               if (ret)
+                       dev_err(&hdev->pdev->dev,
+                               "failed to clear vf vlan for vf%d, ret = %d\n",
+                               vf - HCLGE_VF_VPORT_START_NUM, ret);
+       }
+}
+
 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
                          u16 vlan_id, bool is_kill)
 {
@@ -9895,6 +9931,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
        struct hclge_mac *mac = &hdev->hw.mac;
 
        hclge_reset_vf_rate(hdev);
+       hclge_clear_vf_vlan(hdev);
        hclge_misc_affinity_teardown(hdev);
        hclge_state_uninit(hdev);
 
index a3c0822191a957fffb7d08b62432e3bbdf5e2ba3..3d850f6b1e373685dceb27dd9593b5fd488fb12f 100644 (file)
@@ -799,6 +799,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
                        hclge_get_link_mode(vport, req);
                        break;
                case HCLGE_MBX_GET_VF_FLR_STATUS:
+               case HCLGE_MBX_VF_UNINIT:
                        hclge_rm_vport_all_mac_table(vport, true,
                                                     HCLGE_MAC_ADDR_UC);
                        hclge_rm_vport_all_mac_table(vport, true,
index d6597206e692e2737912e40de5f2685f2d665d5f..0510d85a7f6ae6d9bce3680a98a7b194b8417163 100644 (file)
@@ -2803,6 +2803,9 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
 {
        hclgevf_state_uninit(hdev);
 
+       hclgevf_send_mbx_msg(hdev, HCLGE_MBX_VF_UNINIT, 0, NULL, 0,
+                            false, NULL, 0);
+
        if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
                hclgevf_misc_irq_uninit(hdev);
                hclgevf_uninit_msi(hdev);
index c75239d8820f9519089d2caa74fcea6ab2c02cc1..4bd33245bad625d7bffb7a7002dc776effd3e956 100644 (file)
@@ -2142,6 +2142,8 @@ static void __ibmvnic_reset(struct work_struct *work)
 {
        struct ibmvnic_rwi *rwi;
        struct ibmvnic_adapter *adapter;
+       bool saved_state = false;
+       unsigned long flags;
        u32 reset_state;
        int rc = 0;
 
@@ -2153,17 +2155,25 @@ static void __ibmvnic_reset(struct work_struct *work)
                return;
        }
 
-       reset_state = adapter->state;
-
        rwi = get_next_rwi(adapter);
        while (rwi) {
+               spin_lock_irqsave(&adapter->state_lock, flags);
+
                if (adapter->state == VNIC_REMOVING ||
                    adapter->state == VNIC_REMOVED) {
+                       spin_unlock_irqrestore(&adapter->state_lock, flags);
                        kfree(rwi);
                        rc = EBUSY;
                        break;
                }
 
+               if (!saved_state) {
+                       reset_state = adapter->state;
+                       adapter->state = VNIC_RESETTING;
+                       saved_state = true;
+               }
+               spin_unlock_irqrestore(&adapter->state_lock, flags);
+
                if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
                        /* CHANGE_PARAM requestor holds rtnl_lock */
                        rc = do_change_param_reset(adapter, rwi, reset_state);
@@ -5091,6 +5101,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
                          __ibmvnic_delayed_reset);
        INIT_LIST_HEAD(&adapter->rwi_list);
        spin_lock_init(&adapter->rwi_lock);
+       spin_lock_init(&adapter->state_lock);
        mutex_init(&adapter->fw_lock);
        init_completion(&adapter->init_done);
        init_completion(&adapter->fw_done);
@@ -5163,8 +5174,17 @@ static int ibmvnic_remove(struct vio_dev *dev)
 {
        struct net_device *netdev = dev_get_drvdata(&dev->dev);
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&adapter->state_lock, flags);
+       if (adapter->state == VNIC_RESETTING) {
+               spin_unlock_irqrestore(&adapter->state_lock, flags);
+               return -EBUSY;
+       }
 
        adapter->state = VNIC_REMOVING;
+       spin_unlock_irqrestore(&adapter->state_lock, flags);
+
        rtnl_lock();
        unregister_netdevice(netdev);
 
index 60eccaf91b122e8946595ffb807fbe977e139961..f8416e1d4cf0942fd013faa6cefa85e552fb0849 100644 (file)
@@ -941,7 +941,8 @@ enum vnic_state {VNIC_PROBING = 1,
                 VNIC_CLOSING,
                 VNIC_CLOSED,
                 VNIC_REMOVING,
-                VNIC_REMOVED};
+                VNIC_REMOVED,
+                VNIC_RESETTING};
 
 enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
                           VNIC_RESET_MOBILITY,
@@ -1090,4 +1091,7 @@ struct ibmvnic_adapter {
 
        struct ibmvnic_tunables desired;
        struct ibmvnic_tunables fallback;
+
+       /* Used for serializatin of state field */
+       spinlock_t state_lock;
 };
index 0b9e851f3da4fb1a37989e1aed311a5ad282353f..d2e2dc5384287c55447e32c5e547df65de0a6894 100644 (file)
@@ -347,7 +347,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
        }
 
 
-       dev->err_interrupt = platform_get_irq(pdev, 0);
+       dev->err_interrupt = platform_get_irq_optional(pdev, 0);
        if (dev->err_interrupt > 0 &&
            resource_size(r) < MVMDIO_ERR_INT_MASK + 4) {
                dev_err(&pdev->dev,
@@ -364,8 +364,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
                writel(MVMDIO_ERR_INT_SMI_DONE,
                        dev->regs + MVMDIO_ERR_INT_MASK);
 
-       } else if (dev->err_interrupt == -EPROBE_DEFER) {
-               ret = -EPROBE_DEFER;
+       } else if (dev->err_interrupt < 0) {
+               ret = dev->err_interrupt;
                goto out_mdio;
        }
 
index 86d543ab1ab9307c2ac790bd6368cedd67d0386c..d3b7373c59617b64bea82dd32a91ad4179c1c404 100644 (file)
@@ -2176,24 +2176,29 @@ static int ocelot_init_timestamp(struct ocelot *ocelot)
        return 0;
 }
 
-static void ocelot_port_set_mtu(struct ocelot *ocelot, int port, size_t mtu)
+/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
+ * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
+ */
+static void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
 {
        struct ocelot_port *ocelot_port = ocelot->ports[port];
+       int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN;
        int atop_wm;
 
-       ocelot_port_writel(ocelot_port, mtu, DEV_MAC_MAXLEN_CFG);
+       ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG);
 
        /* Set Pause WM hysteresis
-        * 152 = 6 * mtu / OCELOT_BUFFER_CELL_SZ
-        * 101 = 4 * mtu / OCELOT_BUFFER_CELL_SZ
+        * 152 = 6 * maxlen / OCELOT_BUFFER_CELL_SZ
+        * 101 = 4 * maxlen / OCELOT_BUFFER_CELL_SZ
         */
        ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA |
                         SYS_PAUSE_CFG_PAUSE_STOP(101) |
                         SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, port);
 
        /* Tail dropping watermark */
-       atop_wm = (ocelot->shared_queue_sz - 9 * mtu) / OCELOT_BUFFER_CELL_SZ;
-       ocelot_write_rix(ocelot, ocelot_wm_enc(9 * mtu),
+       atop_wm = (ocelot->shared_queue_sz - 9 * maxlen) /
+                  OCELOT_BUFFER_CELL_SZ;
+       ocelot_write_rix(ocelot, ocelot_wm_enc(9 * maxlen),
                         SYS_ATOP, port);
        ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
 }
@@ -2222,9 +2227,10 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
                           DEV_MAC_HDX_CFG);
 
        /* Set Max Length and maximum tags allowed */
-       ocelot_port_set_mtu(ocelot, port, VLAN_ETH_FRAME_LEN);
+       ocelot_port_set_maxlen(ocelot, port, ETH_DATA_LEN);
        ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
                           DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
+                          DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA |
                           DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
                           DEV_MAC_TAGS_CFG);
 
@@ -2310,18 +2316,18 @@ void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
         * Only one port can be an NPI at the same time.
         */
        if (cpu < ocelot->num_phys_ports) {
-               int mtu = VLAN_ETH_FRAME_LEN + OCELOT_TAG_LEN;
+               int sdu = ETH_DATA_LEN + OCELOT_TAG_LEN;
 
                ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
                             QSYS_EXT_CPU_CFG_EXT_CPU_PORT(cpu),
                             QSYS_EXT_CPU_CFG);
 
                if (injection == OCELOT_TAG_PREFIX_SHORT)
-                       mtu += OCELOT_SHORT_PREFIX_LEN;
+                       sdu += OCELOT_SHORT_PREFIX_LEN;
                else if (injection == OCELOT_TAG_PREFIX_LONG)
-                       mtu += OCELOT_LONG_PREFIX_LEN;
+                       sdu += OCELOT_LONG_PREFIX_LEN;
 
-               ocelot_port_set_mtu(ocelot, cpu, mtu);
+               ocelot_port_set_maxlen(ocelot, cpu, sdu);
        }
 
        /* CPU port Injection/Extraction configuration */
index 191271f6260d2923d577d81fbc53b4d4242af0d5..c2f5b691e0fa09092dc82b6361f6b2ba64a8fc70 100644 (file)
@@ -1688,7 +1688,7 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
        if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
                return -EINVAL;
 
-       down_read(&ionic->vf_op_lock);
+       down_write(&ionic->vf_op_lock);
 
        if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
                ret = -EINVAL;
@@ -1698,7 +1698,7 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
                        ether_addr_copy(ionic->vfs[vf].macaddr, mac);
        }
 
-       up_read(&ionic->vf_op_lock);
+       up_write(&ionic->vf_op_lock);
        return ret;
 }
 
@@ -1719,7 +1719,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
        if (proto != htons(ETH_P_8021Q))
                return -EPROTONOSUPPORT;
 
-       down_read(&ionic->vf_op_lock);
+       down_write(&ionic->vf_op_lock);
 
        if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
                ret = -EINVAL;
@@ -1730,7 +1730,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
                        ionic->vfs[vf].vlanid = vlan;
        }
 
-       up_read(&ionic->vf_op_lock);
+       up_write(&ionic->vf_op_lock);
        return ret;
 }
 
index c705743d69f7ab5c806c2a8c9ccfbc4b8c0dbf92..2cc8184b7e6b5338864b4f5bf5db57b2d6b2c73f 100644 (file)
@@ -2277,7 +2277,7 @@ static int __init sxgbe_cmdline_opt(char *str)
        if (!str || !*str)
                return -EINVAL;
        while ((opt = strsep(&str, ",")) != NULL) {
-               if (!strncmp(opt, "eee_timer:", 6)) {
+               if (!strncmp(opt, "eee_timer:", 10)) {
                        if (kstrtoint(opt + 10, 0, &eee_timer))
                                goto err;
                }
index 52113b7529d6fecd3bcbb5332a98a39a358befb4..3f16bd807c6ef65b71917a9a5af5b7235db18b9b 100644 (file)
@@ -2853,11 +2853,24 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
        }
 
        /* Transmit timestamps are only available for 8XXX series. They result
-        * in three events per packet. These occur in order, and are:
-        *  - the normal completion event
+        * in up to three events per packet. These occur in order, and are:
+        *  - the normal completion event (may be omitted)
         *  - the low part of the timestamp
         *  - the high part of the timestamp
         *
+        * It's possible for multiple completion events to appear before the
+        * corresponding timestamps. So we can for example get:
+        *  COMP N
+        *  COMP N+1
+        *  TS_LO N
+        *  TS_HI N
+        *  TS_LO N+1
+        *  TS_HI N+1
+        *
+        * In addition it's also possible for the adjacent completions to be
+        * merged, so we may not see COMP N above. As such, the completion
+        * events are not very useful here.
+        *
         * Each part of the timestamp is itself split across two 16 bit
         * fields in the event.
         */
@@ -2865,17 +2878,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
 
        switch (tx_ev_type) {
        case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
-               /* In case of Queue flush or FLR, we might have received
-                * the previous TX completion event but not the Timestamp
-                * events.
-                */
-               if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask)
-                       efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
-
-               tx_ev_desc_ptr = EFX_QWORD_FIELD(*event,
-                                                ESF_DZ_TX_DESCR_INDX);
-               tx_queue->completed_desc_ptr =
-                                       tx_ev_desc_ptr & tx_queue->ptr_mask;
+               /* Ignore this event - see above. */
                break;
 
        case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO:
@@ -2887,8 +2890,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
                ts_part = efx_ef10_extract_event_ts(event);
                tx_queue->completed_timestamp_major = ts_part;
 
-               efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
-               tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
+               efx_xmit_done_single(tx_queue);
                break;
 
        default:
index f1bdb04efbe4d326ebc12f7f1fd544467249825f..95395d67ea2d662878d0dc8cac63b90424db1200 100644 (file)
@@ -20,6 +20,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
                                struct net_device *net_dev);
 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
 int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
                 void *type_data);
 extern unsigned int efx_piobuf_size;
index aeb5e8aa2f2a2ba76c679ffa41dc929aff2c0bd1..73d4e39b5b1662e231e32ecc6aaae8a39e55a158 100644 (file)
@@ -583,6 +583,7 @@ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
                if (tx_queue->channel)
                        tx_queue->channel = channel;
                tx_queue->buffer = NULL;
+               tx_queue->cb_page = NULL;
                memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
        }
 
index 9f9886f222c864d9dfb06bf6c39ce2900c064648..8164f0edcbf0aa13f57cc24420fb8965374d5589 100644 (file)
@@ -208,8 +208,6 @@ struct efx_tx_buffer {
  *     avoid cache-line ping-pong between the xmit path and the
  *     completion path.
  * @merge_events: Number of TX merged completion events
- * @completed_desc_ptr: Most recent completed pointer - only used with
- *      timestamping.
  * @completed_timestamp_major: Top part of the most recent tx timestamp.
  * @completed_timestamp_minor: Low part of the most recent tx timestamp.
  * @insert_count: Current insert pointer
@@ -269,7 +267,6 @@ struct efx_tx_queue {
        unsigned int merge_events;
        unsigned int bytes_compl;
        unsigned int pkts_compl;
-       unsigned int completed_desc_ptr;
        u32 completed_timestamp_major;
        u32 completed_timestamp_minor;
 
index 04d7f41d7ed90c4107a0093718523d57b2a17aa5..8aafc54a4684469d645e9304733e6f8f49c6a5b3 100644 (file)
@@ -535,6 +535,44 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
        return efx_enqueue_skb(tx_queue, skb);
 }
 
+void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
+{
+       unsigned int pkts_compl = 0, bytes_compl = 0;
+       unsigned int read_ptr;
+       bool finished = false;
+
+       read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+
+       while (!finished) {
+               struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
+
+               if (!efx_tx_buffer_in_use(buffer)) {
+                       struct efx_nic *efx = tx_queue->efx;
+
+                       netif_err(efx, hw, efx->net_dev,
+                                 "TX queue %d spurious single TX completion\n",
+                                 tx_queue->queue);
+                       efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+                       return;
+               }
+
+               /* Need to check the flag before dequeueing. */
+               if (buffer->flags & EFX_TX_BUF_SKB)
+                       finished = true;
+               efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+
+               ++tx_queue->read_count;
+               read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+       }
+
+       tx_queue->pkts_compl += pkts_compl;
+       tx_queue->bytes_compl += bytes_compl;
+
+       EFX_WARN_ON_PARANOID(pkts_compl != 1);
+
+       efx_xmit_done_check_empty(tx_queue);
+}
+
 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
 {
        struct efx_nic *efx = tx_queue->efx;
index b1571e9789d02b94f93902dfeff24386fbabc42f..70876df1da69b92b741975ad3095ef4432ef158b 100644 (file)
@@ -80,7 +80,6 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
        tx_queue->xmit_more_available = false;
        tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
                                  tx_queue->channel == efx_ptp_channel(efx));
-       tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
        tx_queue->completed_timestamp_major = 0;
        tx_queue->completed_timestamp_minor = 0;
 
@@ -210,10 +209,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
        while (read_ptr != stop_index) {
                struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
 
-               if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
-                   unlikely(buffer->len == 0)) {
+               if (!efx_tx_buffer_in_use(buffer)) {
                        netif_err(efx, tx_err, efx->net_dev,
-                                 "TX queue %d spurious TX completion id %x\n",
+                                 "TX queue %d spurious TX completion id %d\n",
                                  tx_queue->queue, read_ptr);
                        efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
                        return;
@@ -226,6 +224,19 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
        }
 }
 
+void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
+{
+       if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
+               tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
+               if (tx_queue->read_count == tx_queue->old_write_count) {
+                       /* Ensure that read_count is flushed. */
+                       smp_mb();
+                       tx_queue->empty_read_count =
+                               tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
+               }
+       }
+}
+
 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
 {
        unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
@@ -256,15 +267,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
                        netif_tx_wake_queue(tx_queue->core_txq);
        }
 
-       /* Check whether the hardware queue is now empty */
-       if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
-               tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
-               if (tx_queue->read_count == tx_queue->old_write_count) {
-                       smp_mb();
-                       tx_queue->empty_read_count =
-                               tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
-               }
-       }
+       efx_xmit_done_check_empty(tx_queue);
 }
 
 /* Remove buffers put into a tx_queue for the current packet.
index f92f1fe3a87ff27baed559478a628fadf2c8818c..99cf7ce2f36c9a169f1239c1e9d1ac17334aaa03 100644 (file)
@@ -21,6 +21,12 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
                        unsigned int *pkts_compl,
                        unsigned int *bytes_compl);
 
+static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
+{
+       return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION);
+}
+
+void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
 
 void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
index d0356fbd1e4309056919926a9aa677dc9586af2a..542784300620ff41407c00fe7c6e948fc15cba32 100644 (file)
@@ -24,6 +24,7 @@
 static void dwmac1000_core_init(struct mac_device_info *hw,
                                struct net_device *dev)
 {
+       struct stmmac_priv *priv = netdev_priv(dev);
        void __iomem *ioaddr = hw->pcsr;
        u32 value = readl(ioaddr + GMAC_CONTROL);
        int mtu = dev->mtu;
@@ -35,7 +36,7 @@ static void dwmac1000_core_init(struct mac_device_info *hw,
         * Broadcom tags can look like invalid LLC/SNAP packets and cause the
         * hardware to truncate packets on reception.
         */
-       if (netdev_uses_dsa(dev))
+       if (netdev_uses_dsa(dev) || !priv->plat->enh_desc)
                value &= ~GMAC_CONTROL_ACS;
 
        if (mtu > 1500)
index 30cd0c4f0be0b4d1dea2c0a4d68d0e33d1931ebc..8801d093135c3e72ca22643a8fbc7bf896727e4b 100644 (file)
@@ -293,6 +293,7 @@ void ipvlan_process_multicast(struct work_struct *work)
                }
                if (dev)
                        dev_put(dev);
+               cond_resched();
        }
 }
 
@@ -498,19 +499,21 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
        struct ethhdr *ethh = eth_hdr(skb);
        int ret = NET_XMIT_DROP;
 
-       /* In this mode we dont care about multicast and broadcast traffic */
-       if (is_multicast_ether_addr(ethh->h_dest)) {
-               pr_debug_ratelimited("Dropped {multi|broad}cast of type=[%x]\n",
-                                    ntohs(skb->protocol));
-               kfree_skb(skb);
-               goto out;
-       }
-
        /* The ipvlan is a pseudo-L2 device, so the packets that we receive
         * will have L2; which need to discarded and processed further
         * in the net-ns of the main-device.
         */
        if (skb_mac_header_was_set(skb)) {
+               /* In this mode we dont care about
+                * multicast and broadcast traffic */
+               if (is_multicast_ether_addr(ethh->h_dest)) {
+                       pr_debug_ratelimited(
+                               "Dropped {multi|broad}cast of type=[%x]\n",
+                               ntohs(skb->protocol));
+                       kfree_skb(skb);
+                       goto out;
+               }
+
                skb_pull(skb, sizeof(*ethh));
                skb->mac_header = (typeof(skb->mac_header))~0U;
                skb_reset_network_header(skb);
index a70662261a5a28d7947c33461d2f657037258adf..f195f278a83aa296c4c1285794b19b8fbc4ac497 100644 (file)
@@ -164,7 +164,6 @@ static void ipvlan_uninit(struct net_device *dev)
 static int ipvlan_open(struct net_device *dev)
 {
        struct ipvl_dev *ipvlan = netdev_priv(dev);
-       struct net_device *phy_dev = ipvlan->phy_dev;
        struct ipvl_addr *addr;
 
        if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
@@ -178,7 +177,7 @@ static int ipvlan_open(struct net_device *dev)
                ipvlan_ht_addr_add(ipvlan, addr);
        rcu_read_unlock();
 
-       return dev_uc_add(phy_dev, phy_dev->dev_addr);
+       return 0;
 }
 
 static int ipvlan_stop(struct net_device *dev)
@@ -190,8 +189,6 @@ static int ipvlan_stop(struct net_device *dev)
        dev_uc_unsync(phy_dev, dev);
        dev_mc_unsync(phy_dev, dev);
 
-       dev_uc_del(phy_dev, phy_dev->dev_addr);
-
        rcu_read_lock();
        list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
                ipvlan_ht_addr_del(addr);
index 45bfd99f17fa9f6adae6a2e25fc821b23c8c8df4..6ec6fc191a6e469d1149d9e7c79009e5ae55be1b 100644 (file)
@@ -424,6 +424,11 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
        return (struct macsec_eth_header *)skb_mac_header(skb);
 }
 
+static sci_t dev_to_sci(struct net_device *dev, __be16 port)
+{
+       return make_sci(dev->dev_addr, port);
+}
+
 static void __macsec_pn_wrapped(struct macsec_secy *secy,
                                struct macsec_tx_sa *tx_sa)
 {
@@ -3268,6 +3273,20 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
 
 out:
        ether_addr_copy(dev->dev_addr, addr->sa_data);
+       macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
+
+       /* If h/w offloading is available, propagate to the device */
+       if (macsec_is_offloaded(macsec)) {
+               const struct macsec_ops *ops;
+               struct macsec_context ctx;
+
+               ops = macsec_get_ops(macsec, &ctx);
+               if (ops) {
+                       ctx.secy = &macsec->secy;
+                       macsec_offload(ops->mdo_upd_secy, &ctx);
+               }
+       }
+
        return 0;
 }
 
@@ -3342,6 +3361,7 @@ static const struct device_type macsec_type = {
 
 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
        [IFLA_MACSEC_SCI] = { .type = NLA_U64 },
+       [IFLA_MACSEC_PORT] = { .type = NLA_U16 },
        [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
        [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
        [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
@@ -3592,11 +3612,6 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
        return false;
 }
 
-static sci_t dev_to_sci(struct net_device *dev, __be16 port)
-{
-       return make_sci(dev->dev_addr, port);
-}
-
 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
 {
        struct macsec_dev *macsec = macsec_priv(dev);
index 81aa7adf480123cc15e1f8b2722128d6b2fa6236..e7289d67268fc72caf8fe186a8065454270b2190 100644 (file)
@@ -334,6 +334,8 @@ static void macvlan_process_broadcast(struct work_struct *w)
                if (src)
                        dev_put(src->dev);
                consume_skb(skb);
+
+               cond_resched();
        }
 }
 
index 23f1958ba6ad4f6000aa0c5de4d22dca908ef248..459fb2069c7e0121d4d5c15856365b15143c834c 100644 (file)
@@ -73,6 +73,7 @@ static struct phy_driver bcm63xx_driver[] = {
        /* same phy as above, with just a different OUI */
        .phy_id         = 0x002bdc00,
        .phy_id_mask    = 0xfffffc00,
+       .name           = "Broadcom BCM63XX (2)",
        /* PHY_BASIC_FEATURES */
        .flags          = PHY_IS_INTERNAL,
        .config_init    = bcm63xx_config_init,
index d76e038cf2cb5f7972cd0e375f7fab8d93bc13fe..355bfdef48d2b1f6c276e51bab37d3ee995a213e 100644 (file)
@@ -727,7 +727,8 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
                phy_trigger_machine(phydev);
        }
 
-       if (phy_clear_interrupt(phydev))
+       /* did_interrupt() may have cleared the interrupt already */
+       if (!phydev->drv->did_interrupt && phy_clear_interrupt(phydev))
                goto phy_err;
        return IRQ_HANDLED;
 
index c8b0c34030d32cdf7cac3acfdc50903da5eecf0f..28e3c5c0e3c30ca7063bd1e2f8f5b4c9414c4040 100644 (file)
@@ -286,6 +286,8 @@ static int mdio_bus_phy_suspend(struct device *dev)
        if (!mdio_bus_phy_may_suspend(phydev))
                return 0;
 
+       phydev->suspended_by_mdio_bus = 1;
+
        return phy_suspend(phydev);
 }
 
@@ -294,9 +296,11 @@ static int mdio_bus_phy_resume(struct device *dev)
        struct phy_device *phydev = to_phy_device(dev);
        int ret;
 
-       if (!mdio_bus_phy_may_suspend(phydev))
+       if (!phydev->suspended_by_mdio_bus)
                goto no_resume;
 
+       phydev->suspended_by_mdio_bus = 0;
+
        ret = phy_resume(phydev);
        if (ret < 0)
                return ret;
index 70b9a143db84afb8e7cc114942354f961084b696..6e66b8e77ec7b5c017c7e0a8cf654153441c3633 100644 (file)
@@ -761,8 +761,14 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
                config.interface = interface;
 
        ret = phylink_validate(pl, supported, &config);
-       if (ret)
+       if (ret) {
+               phylink_warn(pl, "validation of %s with support %*pb and advertisement %*pb failed: %d\n",
+                            phy_modes(config.interface),
+                            __ETHTOOL_LINK_MODE_MASK_NBITS, phy->supported,
+                            __ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising,
+                            ret);
                return ret;
+       }
 
        phy->phylink = pl;
        phy->phy_link_change = phylink_phy_change;
index 58a69f830d29bd507e5108e893d92bb8608ad8a3..f78ceba42e57e4564544b26e6701c6f0901cb3fa 100644 (file)
@@ -232,7 +232,7 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
        struct cstate *cs = lcs->next;
        unsigned long deltaS, deltaA;
        short changes = 0;
-       int hlen;
+       int nlen, hlen;
        unsigned char new_seq[16];
        unsigned char *cp = new_seq;
        struct iphdr *ip;
@@ -248,6 +248,8 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
                return isize;
 
        ip = (struct iphdr *) icp;
+       if (ip->version != 4 || ip->ihl < 5)
+               return isize;
 
        /* Bail if this packet isn't TCP, or is an IP fragment */
        if (ip->protocol != IPPROTO_TCP || (ntohs(ip->frag_off) & 0x3fff)) {
@@ -258,10 +260,14 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
                        comp->sls_o_tcp++;
                return isize;
        }
-       /* Extract TCP header */
+       nlen = ip->ihl * 4;
+       if (isize < nlen + sizeof(*th))
+               return isize;
 
-       th = (struct tcphdr *)(((unsigned char *)ip) + ip->ihl*4);
-       hlen = ip->ihl*4 + th->doff*4;
+       th = (struct tcphdr *)(icp + nlen);
+       if (th->doff < sizeof(struct tcphdr) / 4)
+               return isize;
+       hlen = nlen + th->doff * 4;
 
        /*  Bail if the TCP packet isn't `compressible' (i.e., ACK isn't set or
         *  some other control bit is set). Also uncompressible if
index ca70a1d840eb38522a20a47b4404a325e7aeb956..4004f98e50d9fc58a8d8d6449448840d3c9da748 100644 (file)
@@ -2240,6 +2240,8 @@ team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
        [TEAM_ATTR_OPTION_CHANGED]              = { .type = NLA_FLAG },
        [TEAM_ATTR_OPTION_TYPE]                 = { .type = NLA_U8 },
        [TEAM_ATTR_OPTION_DATA]                 = { .type = NLA_BINARY },
+       [TEAM_ATTR_OPTION_PORT_IFINDEX]         = { .type = NLA_U32 },
+       [TEAM_ATTR_OPTION_ARRAY_INDEX]          = { .type = NLA_U32 },
 };
 
 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
index 78ddbaf6401b6c953971e4dd6848688483fba89f..95b19ce96513c53b1dc1f2c89fc9d255f4a334bc 100644 (file)
@@ -3221,6 +3221,8 @@ static u16 r8153_phy_status(struct r8152 *tp, u16 desired)
                }
 
                msleep(20);
+               if (test_bit(RTL8152_UNPLUG, &tp->flags))
+                       break;
        }
 
        return data;
@@ -5402,7 +5404,10 @@ static void r8153_init(struct r8152 *tp)
                if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
                    AUTOLOAD_DONE)
                        break;
+
                msleep(20);
+               if (test_bit(RTL8152_UNPLUG, &tp->flags))
+                       break;
        }
 
        data = r8153_phy_status(tp, 0);
@@ -5539,7 +5544,10 @@ static void r8153b_init(struct r8152 *tp)
                if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
                    AUTOLOAD_DONE)
                        break;
+
                msleep(20);
+               if (test_bit(RTL8152_UNPLUG, &tp->flags))
+                       break;
        }
 
        data = r8153_phy_status(tp, 0);
index 8cdc4415fa706913e7fb66ef73abd472fa1edda8..d4cbb9e8c63f62f8dcb40806f959a5ad0b9e7d7d 100644 (file)
@@ -328,7 +328,7 @@ static void veth_get_stats64(struct net_device *dev,
        rcu_read_lock();
        peer = rcu_dereference(priv->peer);
        if (peer) {
-               tot->rx_dropped += veth_stats_tx(peer, &packets, &bytes);
+               veth_stats_tx(peer, &packets, &bytes);
                tot->rx_bytes += bytes;
                tot->rx_packets += packets;
 
index 70b29bf16bb979b1789e4f2e19e9b62798a9fea4..60296a754af26a2ad9acc5ba1852b5f3438074d4 100644 (file)
@@ -308,7 +308,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
                }
 
                /* PHY_SKU section is mandatory in B0 */
-               if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
+               if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT &&
+                   !mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
                        IWL_ERR(mvm,
                                "Can't parse phy_sku in B0, empty sections\n");
                        return NULL;
index 6173c80189ba3e10c31a6b5f5f4575d7e06933e5..1847f55e199b0971969a0b1610be6620c5981150 100644 (file)
@@ -447,10 +447,13 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
        struct page *page = virt_to_head_page(data);
        int offset = data - page_address(page);
        struct sk_buff *skb = q->rx_head;
+       struct skb_shared_info *shinfo = skb_shinfo(skb);
 
-       offset += q->buf_offset;
-       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
-                       q->buf_size);
+       if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
+               offset += q->buf_offset;
+               skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
+                               q->buf_size);
+       }
 
        if (more)
                return;
index 8270bbf505fbe4d873b7a67d17ffc07daeac3da6..9f982c0627a0d1bab11f11c775e778f58a7902bc 100644 (file)
@@ -306,6 +306,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
                                rc = of_mdiobus_register_phy(mdio, child, addr);
                                if (rc && rc != -ENODEV)
                                        goto unregister;
+                               break;
                        }
                }
        }
index 7b6409ef553c949ad25c0b4e63f496073a833b1b..dce2626384a903aeab78cc01d5ddd762097194ab 100644 (file)
@@ -1073,13 +1073,26 @@ static int madera_pin_probe(struct platform_device *pdev)
                return ret;
        }
 
+       platform_set_drvdata(pdev, priv);
+
        dev_dbg(priv->dev, "pinctrl probed ok\n");
 
        return 0;
 }
 
+static int madera_pin_remove(struct platform_device *pdev)
+{
+       struct madera_pin_private *priv = platform_get_drvdata(pdev);
+
+       if (priv->madera->pdata.gpio_configs)
+               pinctrl_unregister_mappings(priv->madera->pdata.gpio_configs);
+
+       return 0;
+}
+
 static struct platform_driver madera_pin_driver = {
        .probe = madera_pin_probe,
+       .remove = madera_pin_remove,
        .driver = {
                .name = "madera-pinctrl",
        },
index 446d84fe0e31c7ec471bfc7fa66a3e782996ac04..f23c55e221955ed0a7721019aedc2c31c27ec4d5 100644 (file)
@@ -2021,7 +2021,6 @@ static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev)
                return PTR_ERR(pctldev->p);
        }
 
-       kref_get(&pctldev->p->users);
        pctldev->hog_default =
                pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
        if (IS_ERR(pctldev->hog_default)) {
index 73bf1d9f9cc6f098d572a4769bd689258c4b24b0..23cf04bdfc55d4551852463eabc6a8c81550f080 100644 (file)
@@ -23,12 +23,12 @@ struct imx_sc_msg_req_pad_set {
        struct imx_sc_rpc_msg hdr;
        u32 val;
        u16 pad;
-} __packed;
+} __packed __aligned(4);
 
 struct imx_sc_msg_req_pad_get {
        struct imx_sc_rpc_msg hdr;
        u16 pad;
-} __packed;
+} __packed __aligned(4);
 
 struct imx_sc_msg_resp_pad_get {
        struct imx_sc_rpc_msg hdr;
index 1b6e8646700f9b31510a82ddff5e41e0438dde77..2ac921c83da9140fa2907bc89db297cd0ef5071d 100644 (file)
@@ -147,8 +147,8 @@ static const unsigned int sdio_d0_pins[]    = { GPIOX_0 };
 static const unsigned int sdio_d1_pins[]       = { GPIOX_1 };
 static const unsigned int sdio_d2_pins[]       = { GPIOX_2 };
 static const unsigned int sdio_d3_pins[]       = { GPIOX_3 };
-static const unsigned int sdio_cmd_pins[]      = { GPIOX_4 };
-static const unsigned int sdio_clk_pins[]      = { GPIOX_5 };
+static const unsigned int sdio_clk_pins[]      = { GPIOX_4 };
+static const unsigned int sdio_cmd_pins[]      = { GPIOX_5 };
 static const unsigned int sdio_irq_pins[]      = { GPIOX_7 };
 
 static const unsigned int nand_ce0_pins[]      = { BOOT_8 };
index a454f57c264eede94a37a7d51573fc3c717c2982..62c02b969327f8f0b1e9f7da2e25adf16f4aaa7c 100644 (file)
@@ -451,7 +451,7 @@ static int pinctrl_falcon_probe(struct platform_device *pdev)
                falcon_info.clk[*bank] = clk_get(&ppdev->dev, NULL);
                if (IS_ERR(falcon_info.clk[*bank])) {
                        dev_err(&ppdev->dev, "failed to get clock\n");
-                       of_node_put(np)
+                       of_node_put(np);
                        return PTR_ERR(falcon_info.clk[*bank]);
                }
                falcon_info.membase[*bank] = devm_ioremap_resource(&pdev->dev,
index 9a8daa256a32136e5ce044e594074d351b59bc54..1a948c3f54b7ca4ad6fd246d097d34f7a9115583 100644 (file)
@@ -1104,7 +1104,6 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
        pctrl->irq_chip.irq_mask = msm_gpio_irq_mask;
        pctrl->irq_chip.irq_unmask = msm_gpio_irq_unmask;
        pctrl->irq_chip.irq_ack = msm_gpio_irq_ack;
-       pctrl->irq_chip.irq_eoi = irq_chip_eoi_parent;
        pctrl->irq_chip.irq_set_type = msm_gpio_irq_set_type;
        pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake;
        pctrl->irq_chip.irq_request_resources = msm_gpio_irq_reqres;
@@ -1118,7 +1117,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
                if (!chip->irq.parent_domain)
                        return -EPROBE_DEFER;
                chip->irq.child_to_parent_hwirq = msm_gpio_wakeirq;
-
+               pctrl->irq_chip.irq_eoi = irq_chip_eoi_parent;
                /*
                 * Let's skip handling the GPIOs, if the parent irqchip
                 * is handling the direct connect IRQ of the GPIO.
index fba1d41d20ece4a245872e5ed99a0ebd2612df66..338a15d08629440d8d6290a6be0f0a2b42daf63f 100644 (file)
@@ -794,7 +794,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
        girq->fwnode = of_node_to_fwnode(pctrl->dev->of_node);
        girq->parent_domain = parent_domain;
        girq->child_to_parent_hwirq = pm8xxx_child_to_parent_hwirq;
-       girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
+       girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_twocell;
        girq->child_offset_to_irq = pm8xxx_child_offset_to_irq;
        girq->child_irq_domain_ops.translate = pm8xxx_domain_translate;
 
index 6cca72782af6a004c02734f2d7d9b21c34770167..cf87eb27879f087cdc125e2c248916f0909a41c1 100644 (file)
@@ -178,6 +178,8 @@ struct dasd_block *dasd_alloc_block(void)
                     (unsigned long) block);
        INIT_LIST_HEAD(&block->ccw_queue);
        spin_lock_init(&block->queue_lock);
+       INIT_LIST_HEAD(&block->format_list);
+       spin_lock_init(&block->format_lock);
        timer_setup(&block->timer, dasd_block_timeout, 0);
        spin_lock_init(&block->profile.lock);
 
@@ -1779,20 +1781,26 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
 
        if (dasd_ese_needs_format(cqr->block, irb)) {
                if (rq_data_dir((struct request *)cqr->callback_data) == READ) {
-                       device->discipline->ese_read(cqr);
+                       device->discipline->ese_read(cqr, irb);
                        cqr->status = DASD_CQR_SUCCESS;
                        cqr->stopclk = now;
                        dasd_device_clear_timer(device);
                        dasd_schedule_device_bh(device);
                        return;
                }
-               fcqr = device->discipline->ese_format(device, cqr);
+               fcqr = device->discipline->ese_format(device, cqr, irb);
                if (IS_ERR(fcqr)) {
+                       if (PTR_ERR(fcqr) == -EINVAL) {
+                               cqr->status = DASD_CQR_ERROR;
+                               return;
+                       }
                        /*
                         * If we can't format now, let the request go
                         * one extra round. Maybe we can format later.
                         */
                        cqr->status = DASD_CQR_QUEUED;
+                       dasd_schedule_device_bh(device);
+                       return;
                } else {
                        fcqr->status = DASD_CQR_QUEUED;
                        cqr->status = DASD_CQR_QUEUED;
@@ -2748,11 +2756,13 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
 {
        struct request *req;
        blk_status_t error = BLK_STS_OK;
+       unsigned int proc_bytes;
        int status;
 
        req = (struct request *) cqr->callback_data;
        dasd_profile_end(cqr->block, cqr, req);
 
+       proc_bytes = cqr->proc_bytes;
        status = cqr->block->base->discipline->free_cp(cqr, req);
        if (status < 0)
                error = errno_to_blk_status(status);
@@ -2783,7 +2793,18 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
                blk_mq_end_request(req, error);
                blk_mq_run_hw_queues(req->q, true);
        } else {
-               blk_mq_complete_request(req);
+               /*
+                * Partial completed requests can happen with ESE devices.
+                * During read we might have gotten a NRF error and have to
+                * complete a request partially.
+                */
+               if (proc_bytes) {
+                       blk_update_request(req, BLK_STS_OK,
+                                          blk_rq_bytes(req) - proc_bytes);
+                       blk_mq_requeue_request(req, true);
+               } else {
+                       blk_mq_complete_request(req);
+               }
        }
 }
 
index a28b9ff823780de364ba7e79d871a92c464bce49..ad44d22e88591232ef59f5e4e9ec98564ef34a9d 100644 (file)
@@ -207,6 +207,45 @@ static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
        geo->head |= head;
 }
 
+/*
+ * calculate failing track from sense data depending if
+ * it is an EAV device or not
+ */
+static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device,
+                                   sector_t *track)
+{
+       struct dasd_eckd_private *private = device->private;
+       u8 *sense = NULL;
+       u32 cyl;
+       u8 head;
+
+       sense = dasd_get_sense(irb);
+       if (!sense) {
+               DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+                             "ESE error no sense data\n");
+               return -EINVAL;
+       }
+       if (!(sense[27] & DASD_SENSE_BIT_2)) {
+               DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+                             "ESE error no valid track data\n");
+               return -EINVAL;
+       }
+
+       if (sense[27] & DASD_SENSE_BIT_3) {
+               /* enhanced addressing */
+               cyl = sense[30] << 20;
+               cyl |= (sense[31] & 0xF0) << 12;
+               cyl |= sense[28] << 8;
+               cyl |= sense[29];
+       } else {
+               cyl = sense[29] << 8;
+               cyl |= sense[30];
+       }
+       head = sense[31] & 0x0F;
+       *track = cyl * private->rdc_data.trk_per_cyl + head;
+       return 0;
+}
+
 static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
                     struct dasd_device *device)
 {
@@ -2986,6 +3025,37 @@ static int dasd_eckd_format_device(struct dasd_device *base,
                                             0, NULL);
 }
 
+static bool test_and_set_format_track(struct dasd_format_entry *to_format,
+                                     struct dasd_block *block)
+{
+       struct dasd_format_entry *format;
+       unsigned long flags;
+       bool rc = false;
+
+       spin_lock_irqsave(&block->format_lock, flags);
+       list_for_each_entry(format, &block->format_list, list) {
+               if (format->track == to_format->track) {
+                       rc = true;
+                       goto out;
+               }
+       }
+       list_add_tail(&to_format->list, &block->format_list);
+
+out:
+       spin_unlock_irqrestore(&block->format_lock, flags);
+       return rc;
+}
+
+static void clear_format_track(struct dasd_format_entry *format,
+                             struct dasd_block *block)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&block->format_lock, flags);
+       list_del_init(&format->list);
+       spin_unlock_irqrestore(&block->format_lock, flags);
+}
+
 /*
  * Callback function to free ESE format requests.
  */
@@ -2993,15 +3063,19 @@ static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
 {
        struct dasd_device *device = cqr->startdev;
        struct dasd_eckd_private *private = device->private;
+       struct dasd_format_entry *format = data;
 
+       clear_format_track(format, cqr->basedev->block);
        private->count--;
        dasd_ffree_request(cqr, device);
 }
 
 static struct dasd_ccw_req *
-dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
+dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
+                    struct irb *irb)
 {
        struct dasd_eckd_private *private;
+       struct dasd_format_entry *format;
        struct format_data_t fdata;
        unsigned int recs_per_trk;
        struct dasd_ccw_req *fcqr;
@@ -3011,23 +3085,39 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
        struct request *req;
        sector_t first_trk;
        sector_t last_trk;
+       sector_t curr_trk;
        int rc;
 
        req = cqr->callback_data;
-       base = cqr->block->base;
+       block = cqr->block;
+       base = block->base;
        private = base->private;
-       block = base->block;
        blksize = block->bp_block;
        recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+       format = &startdev->format_entry;
 
        first_trk = blk_rq_pos(req) >> block->s2b_shift;
        sector_div(first_trk, recs_per_trk);
        last_trk =
                (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
        sector_div(last_trk, recs_per_trk);
+       rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
+       if (rc)
+               return ERR_PTR(rc);
 
-       fdata.start_unit = first_trk;
-       fdata.stop_unit = last_trk;
+       if (curr_trk < first_trk || curr_trk > last_trk) {
+               DBF_DEV_EVENT(DBF_WARNING, startdev,
+                             "ESE error track %llu not within range %llu - %llu\n",
+                             curr_trk, first_trk, last_trk);
+               return ERR_PTR(-EINVAL);
+       }
+       format->track = curr_trk;
+       /* test if track is already in formatting by another thread */
+       if (test_and_set_format_track(format, block))
+               return ERR_PTR(-EEXIST);
+
+       fdata.start_unit = curr_trk;
+       fdata.stop_unit = curr_trk;
        fdata.blksize = blksize;
        fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
 
@@ -3044,6 +3134,7 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
                return fcqr;
 
        fcqr->callback = dasd_eckd_ese_format_cb;
+       fcqr->callback_data = (void *) format;
 
        return fcqr;
 }
@@ -3051,29 +3142,87 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
 /*
  * When data is read from an unformatted area of an ESE volume, this function
  * returns zeroed data and thereby mimics a read of zero data.
+ *
+ * The first unformatted track is the one that got the NRF error, the address is
+ * encoded in the sense data.
+ *
+ * All tracks before have returned valid data and should not be touched.
+ * All tracks after the unformatted track might be formatted or not. This is
+ * currently not known, remember the processed data and return the remainder of
+ * the request to the blocklayer in __dasd_cleanup_cqr().
  */
-static void dasd_eckd_ese_read(struct dasd_ccw_req *cqr)
+static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
 {
+       struct dasd_eckd_private *private;
+       sector_t first_trk, last_trk;
+       sector_t first_blk, last_blk;
        unsigned int blksize, off;
+       unsigned int recs_per_trk;
        struct dasd_device *base;
        struct req_iterator iter;
+       struct dasd_block *block;
+       unsigned int skip_block;
+       unsigned int blk_count;
        struct request *req;
        struct bio_vec bv;
+       sector_t curr_trk;
+       sector_t end_blk;
        char *dst;
+       int rc;
 
        req = (struct request *) cqr->callback_data;
        base = cqr->block->base;
        blksize = base->block->bp_block;
+       block =  cqr->block;
+       private = base->private;
+       skip_block = 0;
+       blk_count = 0;
+
+       recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+       first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
+       sector_div(first_trk, recs_per_trk);
+       last_trk = last_blk =
+               (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
+       sector_div(last_trk, recs_per_trk);
+       rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
+       if (rc)
+               return rc;
+
+       /* sanity check if the current track from sense data is valid */
+       if (curr_trk < first_trk || curr_trk > last_trk) {
+               DBF_DEV_EVENT(DBF_WARNING, base,
+                             "ESE error track %llu not within range %llu - %llu\n",
+                             curr_trk, first_trk, last_trk);
+               return -EINVAL;
+       }
+
+       /*
+        * if not the first track got the NRF error we have to skip over valid
+        * blocks
+        */
+       if (curr_trk != first_trk)
+               skip_block = curr_trk * recs_per_trk - first_blk;
+
+       /* we have no information beyond the current track */
+       end_blk = (curr_trk + 1) * recs_per_trk;
 
        rq_for_each_segment(bv, req, iter) {
                dst = page_address(bv.bv_page) + bv.bv_offset;
                for (off = 0; off < bv.bv_len; off += blksize) {
-                       if (dst && rq_data_dir(req) == READ) {
+                       if (first_blk + blk_count >= end_blk) {
+                               cqr->proc_bytes = blk_count * blksize;
+                               return 0;
+                       }
+                       if (dst && !skip_block) {
                                dst += off;
                                memset(dst, 0, blksize);
+                       } else {
+                               skip_block--;
                        }
+                       blk_count++;
                }
        }
+       return 0;
 }
 
 /*
index 91c9f9586e0f645f87fe9899fb7f3e437ee85bb8..fa552f9f1666713313ccb644e425d2b41f229942 100644 (file)
@@ -187,6 +187,7 @@ struct dasd_ccw_req {
 
        void (*callback)(struct dasd_ccw_req *, void *data);
        void *callback_data;
+       unsigned int proc_bytes;        /* bytes for partial completion */
 };
 
 /*
@@ -387,8 +388,9 @@ struct dasd_discipline {
        int (*ext_pool_warn_thrshld)(struct dasd_device *);
        int (*ext_pool_oos)(struct dasd_device *);
        int (*ext_pool_exhaust)(struct dasd_device *, struct dasd_ccw_req *);
-       struct dasd_ccw_req *(*ese_format)(struct dasd_device *, struct dasd_ccw_req *);
-       void (*ese_read)(struct dasd_ccw_req *);
+       struct dasd_ccw_req *(*ese_format)(struct dasd_device *,
+                                          struct dasd_ccw_req *, struct irb *);
+       int (*ese_read)(struct dasd_ccw_req *, struct irb *);
 };
 
 extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -474,6 +476,11 @@ struct dasd_profile {
        spinlock_t lock;
 };
 
+struct dasd_format_entry {
+       struct list_head list;
+       sector_t track;
+};
+
 struct dasd_device {
        /* Block device stuff. */
        struct dasd_block *block;
@@ -539,6 +546,7 @@ struct dasd_device {
        struct dentry *debugfs_dentry;
        struct dentry *hosts_dentry;
        struct dasd_profile profile;
+       struct dasd_format_entry format_entry;
 };
 
 struct dasd_block {
@@ -564,6 +572,9 @@ struct dasd_block {
 
        struct dentry *debugfs_dentry;
        struct dasd_profile profile;
+
+       struct list_head format_list;
+       spinlock_t format_lock;
 };
 
 struct dasd_attention_data {
index 9575a627a1e18d446cf79dd7cb4011dfa2f2accf..468cada49e72c012544dbbbbb47ad8c7585a6213 100644 (file)
@@ -369,7 +369,7 @@ enum qeth_qdio_info_states {
 struct qeth_buffer_pool_entry {
        struct list_head list;
        struct list_head init_list;
-       void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
+       struct page *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
 };
 
 struct qeth_qdio_buffer_pool {
@@ -983,7 +983,7 @@ extern const struct attribute_group qeth_device_blkt_group;
 extern const struct device_type qeth_generic_devtype;
 
 const char *qeth_get_cardname_short(struct qeth_card *);
-int qeth_realloc_buffer_pool(struct qeth_card *, int);
+int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count);
 int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
 void qeth_core_free_discipline(struct qeth_card *);
 
index 8ca85c8a01a15cd000fd1b8b2970ce28207256c3..6d3f2f14b4143fdf5584001786ea82c0a62a2cca 100644 (file)
@@ -65,7 +65,6 @@ static struct lock_class_key qdio_out_skb_queue_key;
 static void qeth_issue_next_read_cb(struct qeth_card *card,
                                    struct qeth_cmd_buffer *iob,
                                    unsigned int data_length);
-static void qeth_free_buffer_pool(struct qeth_card *);
 static int qeth_qdio_establish(struct qeth_card *);
 static void qeth_free_qdio_queues(struct qeth_card *card);
 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
@@ -212,49 +211,121 @@ void qeth_clear_working_pool_list(struct qeth_card *card)
 }
 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
 
+static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
+               if (entry->elements[i])
+                       __free_page(entry->elements[i]);
+       }
+
+       kfree(entry);
+}
+
+static void qeth_free_buffer_pool(struct qeth_card *card)
+{
+       struct qeth_buffer_pool_entry *entry, *tmp;
+
+       list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
+                                init_list) {
+               list_del(&entry->init_list);
+               qeth_free_pool_entry(entry);
+       }
+}
+
+static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
+{
+       struct qeth_buffer_pool_entry *entry;
+       unsigned int i;
+
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return NULL;
+
+       for (i = 0; i < pages; i++) {
+               entry->elements[i] = alloc_page(GFP_KERNEL);
+
+               if (!entry->elements[i]) {
+                       qeth_free_pool_entry(entry);
+                       return NULL;
+               }
+       }
+
+       return entry;
+}
+
 static int qeth_alloc_buffer_pool(struct qeth_card *card)
 {
-       struct qeth_buffer_pool_entry *pool_entry;
-       void *ptr;
-       int i, j;
+       unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+       unsigned int i;
 
        QETH_CARD_TEXT(card, 5, "alocpool");
        for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
-               pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
-               if (!pool_entry) {
+               struct qeth_buffer_pool_entry *entry;
+
+               entry = qeth_alloc_pool_entry(buf_elements);
+               if (!entry) {
                        qeth_free_buffer_pool(card);
                        return -ENOMEM;
                }
-               for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
-                       ptr = (void *) __get_free_page(GFP_KERNEL);
-                       if (!ptr) {
-                               while (j > 0)
-                                       free_page((unsigned long)
-                                                 pool_entry->elements[--j]);
-                               kfree(pool_entry);
-                               qeth_free_buffer_pool(card);
-                               return -ENOMEM;
-                       }
-                       pool_entry->elements[j] = ptr;
-               }
-               list_add(&pool_entry->init_list,
-                        &card->qdio.init_pool.entry_list);
+
+               list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
        }
        return 0;
 }
 
-int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
+int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
 {
+       unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+       struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
+       struct qeth_buffer_pool_entry *entry, *tmp;
+       int delta = count - pool->buf_count;
+       LIST_HEAD(entries);
+
        QETH_CARD_TEXT(card, 2, "realcbp");
 
-       /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
-       qeth_clear_working_pool_list(card);
-       qeth_free_buffer_pool(card);
-       card->qdio.in_buf_pool.buf_count = bufcnt;
-       card->qdio.init_pool.buf_count = bufcnt;
-       return qeth_alloc_buffer_pool(card);
+       /* Defer until queue is allocated: */
+       if (!card->qdio.in_q)
+               goto out;
+
+       /* Remove entries from the pool: */
+       while (delta < 0) {
+               entry = list_first_entry(&pool->entry_list,
+                                        struct qeth_buffer_pool_entry,
+                                        init_list);
+               list_del(&entry->init_list);
+               qeth_free_pool_entry(entry);
+
+               delta++;
+       }
+
+       /* Allocate additional entries: */
+       while (delta > 0) {
+               entry = qeth_alloc_pool_entry(buf_elements);
+               if (!entry) {
+                       list_for_each_entry_safe(entry, tmp, &entries,
+                                                init_list) {
+                               list_del(&entry->init_list);
+                               qeth_free_pool_entry(entry);
+                       }
+
+                       return -ENOMEM;
+               }
+
+               list_add(&entry->init_list, &entries);
+
+               delta--;
+       }
+
+       list_splice(&entries, &pool->entry_list);
+
+out:
+       card->qdio.in_buf_pool.buf_count = count;
+       pool->buf_count = count;
+       return 0;
 }
-EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
+EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
 
 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
 {
@@ -1170,19 +1241,6 @@ void qeth_drain_output_queues(struct qeth_card *card)
 }
 EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
 
-static void qeth_free_buffer_pool(struct qeth_card *card)
-{
-       struct qeth_buffer_pool_entry *pool_entry, *tmp;
-       int i = 0;
-       list_for_each_entry_safe(pool_entry, tmp,
-                                &card->qdio.init_pool.entry_list, init_list){
-               for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
-                       free_page((unsigned long)pool_entry->elements[i]);
-               list_del(&pool_entry->init_list);
-               kfree(pool_entry);
-       }
-}
-
 static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
 {
        unsigned int count = single ? 1 : card->dev->num_tx_queues;
@@ -1204,7 +1262,6 @@ static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
        if (count == 1)
                dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
 
-       card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
        card->qdio.no_out_queues = count;
        return 0;
 }
@@ -2393,7 +2450,6 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
                return;
 
        qeth_free_cq(card);
-       cancel_delayed_work_sync(&card->buffer_reclaim_work);
        for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
                if (card->qdio.in_q->bufs[j].rx_skb)
                        dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
@@ -2575,7 +2631,6 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
        struct list_head *plh;
        struct qeth_buffer_pool_entry *entry;
        int i, free;
-       struct page *page;
 
        if (list_empty(&card->qdio.in_buf_pool.entry_list))
                return NULL;
@@ -2584,7 +2639,7 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
                entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
                free = 1;
                for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
-                       if (page_count(virt_to_page(entry->elements[i])) > 1) {
+                       if (page_count(entry->elements[i]) > 1) {
                                free = 0;
                                break;
                        }
@@ -2599,15 +2654,15 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
        entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
                        struct qeth_buffer_pool_entry, list);
        for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
-               if (page_count(virt_to_page(entry->elements[i])) > 1) {
-                       page = alloc_page(GFP_ATOMIC);
-                       if (!page) {
+               if (page_count(entry->elements[i]) > 1) {
+                       struct page *page = alloc_page(GFP_ATOMIC);
+
+                       if (!page)
                                return NULL;
-                       } else {
-                               free_page((unsigned long)entry->elements[i]);
-                               entry->elements[i] = page_address(page);
-                               QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
-                       }
+
+                       __free_page(entry->elements[i]);
+                       entry->elements[i] = page;
+                       QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
                }
        }
        list_del_init(&entry->list);
@@ -2625,12 +2680,12 @@ static int qeth_init_input_buffer(struct qeth_card *card,
                                               ETH_HLEN +
                                               sizeof(struct ipv6hdr));
                if (!buf->rx_skb)
-                       return 1;
+                       return -ENOMEM;
        }
 
        pool_entry = qeth_find_free_buffer_pool_entry(card);
        if (!pool_entry)
-               return 1;
+               return -ENOBUFS;
 
        /*
         * since the buffer is accessed only from the input_tasklet
@@ -2643,7 +2698,7 @@ static int qeth_init_input_buffer(struct qeth_card *card,
        for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
                buf->buffer->element[i].length = PAGE_SIZE;
                buf->buffer->element[i].addr =
-                       virt_to_phys(pool_entry->elements[i]);
+                       page_to_phys(pool_entry->elements[i]);
                if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
                        buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
                else
@@ -2675,10 +2730,15 @@ static int qeth_init_qdio_queues(struct qeth_card *card)
        /* inbound queue */
        qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
        memset(&card->rx, 0, sizeof(struct qeth_rx));
+
        qeth_initialize_working_pool_list(card);
        /*give only as many buffers to hardware as we have buffer pool entries*/
-       for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
-               qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
+       for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) {
+               rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
+               if (rc)
+                       return rc;
+       }
+
        card->qdio.in_q->next_buf_to_init =
                card->qdio.in_buf_pool.buf_count - 1;
        rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
index 2bd9993aa60b8c89546331a25c6013b43646def6..78cae61bc924cb958eed75d041d698b51403e69b 100644 (file)
@@ -247,8 +247,8 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct qeth_card *card = dev_get_drvdata(dev);
+       unsigned int cnt;
        char *tmp;
-       int cnt, old_cnt;
        int rc = 0;
 
        mutex_lock(&card->conf_mutex);
@@ -257,13 +257,12 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
                goto out;
        }
 
-       old_cnt = card->qdio.in_buf_pool.buf_count;
        cnt = simple_strtoul(buf, &tmp, 10);
        cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
                ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
-       if (old_cnt != cnt) {
-               rc = qeth_realloc_buffer_pool(card, cnt);
-       }
+
+       rc = qeth_resize_buffer_pool(card, cnt);
+
 out:
        mutex_unlock(&card->conf_mutex);
        return rc ? rc : count;
index 9972d96820f3ffbf97513419a19397f75631e8bf..8fb29371788b5cedc5356a3f1c81cce1b1163065 100644 (file)
@@ -284,6 +284,7 @@ static void qeth_l2_stop_card(struct qeth_card *card)
        if (card->state == CARD_STATE_SOFTSETUP) {
                qeth_clear_ipacmd_list(card);
                qeth_drain_output_queues(card);
+               cancel_delayed_work_sync(&card->buffer_reclaim_work);
                card->state = CARD_STATE_DOWN;
        }
 
index 317d56647a4a3b8965b155be0fcfc83b0bdc157e..82f800d1d7b3aacb54099b53160ea0a6c06b803f 100644 (file)
@@ -1178,6 +1178,7 @@ static void qeth_l3_stop_card(struct qeth_card *card)
                qeth_l3_clear_ip_htable(card, 1);
                qeth_clear_ipacmd_list(card);
                qeth_drain_output_queues(card);
+               cancel_delayed_work_sync(&card->buffer_reclaim_work);
                card->state = CARD_STATE_DOWN;
        }
 
index 29f2517d2a31b325928e10324215ca85680263fd..a3d1c3bdfadb17d033ec23fcc1aa3367aa4437fc 100644 (file)
@@ -206,12 +206,11 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
                qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
                if (card->ssqd.qdioac2 & CHSC_AC2_SNIFFER_AVAILABLE) {
                        card->options.sniffer = i;
-                       if (card->qdio.init_pool.buf_count !=
-                                       QETH_IN_BUF_COUNT_MAX)
-                               qeth_realloc_buffer_pool(card,
-                                       QETH_IN_BUF_COUNT_MAX);
-               } else
+                       qeth_resize_buffer_pool(card, QETH_IN_BUF_COUNT_MAX);
+               } else {
                        rc = -EPERM;
+               }
+
                break;
        default:
                rc = -EINVAL;
index ae45cbe98ae23957306c700ae0a6e1e1de05493c..cd8db134987153572ca6c3636f02a207297aa0d5 100644 (file)
@@ -9950,6 +9950,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
        ioa_cfg->max_devs_supported = ipr_max_devs;
 
        if (ioa_cfg->sis64) {
+               host->max_channel = IPR_MAX_SIS64_BUSES;
                host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
                host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
                if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
@@ -9958,6 +9959,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
                                           + ((sizeof(struct ipr_config_table_entry64)
                                               * ioa_cfg->max_devs_supported)));
        } else {
+               host->max_channel = IPR_VSET_BUS;
                host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
                host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
                if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
@@ -9967,7 +9969,6 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
                                               * ioa_cfg->max_devs_supported)));
        }
 
-       host->max_channel = IPR_VSET_BUS;
        host->unique_id = host->host_no;
        host->max_cmd_len = IPR_MAX_CDB_LEN;
        host->can_queue = ioa_cfg->max_cmds;
index a67baeb36d1f7e942149427b349f5d8cd456e843..b97aa9ac2ffe5e09645850b30d5b0ac3d70740cb 100644 (file)
@@ -1300,6 +1300,7 @@ struct ipr_resource_entry {
 #define IPR_ARRAY_VIRTUAL_BUS                  0x1
 #define IPR_VSET_VIRTUAL_BUS                   0x2
 #define IPR_IOAFP_VIRTUAL_BUS                  0x3
+#define IPR_MAX_SIS64_BUSES                    0x4
 
 #define IPR_GET_RES_PHYS_LOC(res) \
        (((res)->bus << 24) | ((res)->target << 8) | (res)->lun)
index abd0e6b05f7913a9c3ba3dbac66a57abb078f74d..2d705694636c235c71534cdae3b13eb17bd4a6a8 100644 (file)
@@ -3884,18 +3884,25 @@ EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
 {
        unsigned long flags;
+       bool update = false;
 
-       if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT))
+       if (!ufshcd_is_auto_hibern8_supported(hba))
                return;
 
        spin_lock_irqsave(hba->host->host_lock, flags);
-       if (hba->ahit == ahit)
-               goto out_unlock;
-       hba->ahit = ahit;
-       if (!pm_runtime_suspended(hba->dev))
-               ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
-out_unlock:
+       if (hba->ahit != ahit) {
+               hba->ahit = ahit;
+               update = true;
+       }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       if (update && !pm_runtime_suspended(hba->dev)) {
+               pm_runtime_get_sync(hba->dev);
+               ufshcd_hold(hba, false);
+               ufshcd_auto_hibern8_enable(hba);
+               ufshcd_release(hba);
+               pm_runtime_put(hba->dev);
+       }
 }
 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
 
index 7bfe365d93720c8b9c4ebb1809e1e1adc7c5f9f8..341458fd95ca4896e9eb4bd708473dc1df1ff012 100644 (file)
@@ -959,8 +959,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
        iput(vb->vb_dev_info.inode);
 out_kern_unmount:
        kern_unmount(balloon_mnt);
-#endif
 out_del_vqs:
+#endif
        vdev->config->del_vqs(vdev);
 out_free_vb:
        kfree(vb);
index 867c7ebd3f107a500b6db20211428feab78fda82..58b96baa8d488a94da858688147c5f9dd6ae4d30 100644 (file)
@@ -2203,10 +2203,10 @@ void vring_del_virtqueue(struct virtqueue *_vq)
                                         vq->split.queue_size_in_bytes,
                                         vq->split.vring.desc,
                                         vq->split.queue_dma_addr);
-
-                       kfree(vq->split.desc_state);
                }
        }
+       if (!vq->packed_ring)
+               kfree(vq->split.desc_state);
        list_del(&_vq->list);
        kfree(vq);
 }
index 0f7373ba10d5be065811e429fce8b93f71a2ef4f..69e92e692ae01eacb11c39a9bc45351bbd4768c6 100644 (file)
@@ -1,10 +1,12 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /* iTCO Vendor Specific Support hooks */
 #ifdef CONFIG_ITCO_VENDOR_SUPPORT
+extern int iTCO_vendorsupport;
 extern void iTCO_vendor_pre_start(struct resource *, unsigned int);
 extern void iTCO_vendor_pre_stop(struct resource *);
 extern int iTCO_vendor_check_noreboot_on(void);
 #else
+#define iTCO_vendorsupport                             0
 #define iTCO_vendor_pre_start(acpibase, heartbeat)     {}
 #define iTCO_vendor_pre_stop(acpibase)                 {}
 #define iTCO_vendor_check_noreboot_on()                        1
index 4f1b96f59349666ca78b1e9e7a6253ed6c9acc60..cf0eaa04b064d0fea561873fecf3d3b69574acf1 100644 (file)
 /* Broken BIOS */
 #define BROKEN_BIOS            911
 
-static int vendorsupport;
-module_param(vendorsupport, int, 0);
+int iTCO_vendorsupport;
+EXPORT_SYMBOL(iTCO_vendorsupport);
+
+module_param_named(vendorsupport, iTCO_vendorsupport, int, 0);
 MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default="
                        "0 (none), 1=SuperMicro Pent3, 911=Broken SMI BIOS");
 
@@ -152,7 +154,7 @@ static void broken_bios_stop(struct resource *smires)
 void iTCO_vendor_pre_start(struct resource *smires,
                           unsigned int heartbeat)
 {
-       switch (vendorsupport) {
+       switch (iTCO_vendorsupport) {
        case SUPERMICRO_OLD_BOARD:
                supermicro_old_pre_start(smires);
                break;
@@ -165,7 +167,7 @@ EXPORT_SYMBOL(iTCO_vendor_pre_start);
 
 void iTCO_vendor_pre_stop(struct resource *smires)
 {
-       switch (vendorsupport) {
+       switch (iTCO_vendorsupport) {
        case SUPERMICRO_OLD_BOARD:
                supermicro_old_pre_stop(smires);
                break;
@@ -178,7 +180,7 @@ EXPORT_SYMBOL(iTCO_vendor_pre_stop);
 
 int iTCO_vendor_check_noreboot_on(void)
 {
-       switch (vendorsupport) {
+       switch (iTCO_vendorsupport) {
        case SUPERMICRO_OLD_BOARD:
                return 0;
        default:
@@ -189,13 +191,13 @@ EXPORT_SYMBOL(iTCO_vendor_check_noreboot_on);
 
 static int __init iTCO_vendor_init_module(void)
 {
-       if (vendorsupport == SUPERMICRO_NEW_BOARD) {
+       if (iTCO_vendorsupport == SUPERMICRO_NEW_BOARD) {
                pr_warn("Option vendorsupport=%d is no longer supported, "
                        "please use the w83627hf_wdt driver instead\n",
                        SUPERMICRO_NEW_BOARD);
                return -EINVAL;
        }
-       pr_info("vendor-support=%d\n", vendorsupport);
+       pr_info("vendor-support=%d\n", iTCO_vendorsupport);
        return 0;
 }
 
index 156360e37714af6f3f7dd9b590222db5253cc9ff..e707c4797f76e57d8e89eb9d5249728c409a58ad 100644 (file)
@@ -459,13 +459,25 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
        if (!p->tco_res)
                return -ENODEV;
 
-       p->smi_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_SMI);
-       if (!p->smi_res)
-               return -ENODEV;
-
        p->iTCO_version = pdata->version;
        p->pci_dev = to_pci_dev(dev->parent);
 
+       p->smi_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_SMI);
+       if (p->smi_res) {
+               /* The TCO logic uses the TCO_EN bit in the SMI_EN register */
+               if (!devm_request_region(dev, p->smi_res->start,
+                                        resource_size(p->smi_res),
+                                        pdev->name)) {
+                       pr_err("I/O address 0x%04llx already in use, device disabled\n",
+                              (u64)SMI_EN(p));
+                       return -EBUSY;
+               }
+       } else if (iTCO_vendorsupport ||
+                  turn_SMI_watchdog_clear_off >= p->iTCO_version) {
+               pr_err("SMI I/O resource is missing\n");
+               return -ENODEV;
+       }
+
        iTCO_wdt_no_reboot_bit_setup(p, pdata);
 
        /*
@@ -492,14 +504,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
        /* Set the NO_REBOOT bit to prevent later reboots, just for sure */
        p->update_no_reboot_bit(p->no_reboot_priv, true);
 
-       /* The TCO logic uses the TCO_EN bit in the SMI_EN register */
-       if (!devm_request_region(dev, p->smi_res->start,
-                                resource_size(p->smi_res),
-                                pdev->name)) {
-               pr_err("I/O address 0x%04llx already in use, device disabled\n",
-                      (u64)SMI_EN(p));
-               return -EBUSY;
-       }
        if (turn_SMI_watchdog_clear_off >= p->iTCO_version) {
                /*
                 * Bit 13: TCO_EN -> 0
index df415c05939e7ad4f5d7716d68e4f2bead7eecf7..de1ae0bead3baf9192bde373287924c3b0d500fd 100644 (file)
@@ -19,7 +19,7 @@
 void afs_put_addrlist(struct afs_addr_list *alist)
 {
        if (alist && refcount_dec_and_test(&alist->usage))
-               call_rcu(&alist->rcu, (rcu_callback_t)kfree);
+               kfree_rcu(alist, rcu);
 }
 
 /*
index 1d81fc4c3058c328fcbdbe960dafd1a553b26661..35f951ac296f47edbf04dbdd847270b61b757288 100644 (file)
@@ -81,7 +81,7 @@ enum afs_call_state {
  * List of server addresses.
  */
 struct afs_addr_list {
-       struct rcu_head         rcu;            /* Must be first */
+       struct rcu_head         rcu;
        refcount_t              usage;
        u32                     version;        /* Version */
        unsigned char           max_addrs;
index 0ef099442f209c779a1b2e3588cbaade5cf173c3..36e7b2fd2190b32462eba9c65c424408a15d0fe6 100644 (file)
@@ -555,7 +555,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
                if (server->ops->close)
                        server->ops->close(xid, tcon, &fid);
                cifs_del_pending_open(&open);
-               fput(file);
                rc = -ENOMEM;
        }
 
index 65cb09fa6ead922e87f62a9c20d0e04372f583f2..08c9f216a54dda778edd44152cfabe0385539ef5 100644 (file)
@@ -538,6 +538,15 @@ int fscrypt_drop_inode(struct inode *inode)
                return 0;
        mk = ci->ci_master_key->payload.data[0];
 
+       /*
+        * With proper, non-racy use of FS_IOC_REMOVE_ENCRYPTION_KEY, all inodes
+        * protected by the key were cleaned by sync_filesystem().  But if
+        * userspace is still using the files, inodes can be dirtied between
+        * then and now.  We mustn't lose any writes, so skip dirty inodes here.
+        */
+       if (inode->i_state & I_DIRTY_ALL)
+               return 0;
+
        /*
         * Note: since we aren't holding ->mk_secret_sem, the result here can
         * immediately become outdated.  But there's no correctness problem with
index 8e02d76fe104aa4438f2076d7450d17683fc636f..97eec7522bf203a929cefd7e0afd67fdd87cd7cb 100644 (file)
@@ -276,12 +276,10 @@ static void flush_bg_queue(struct fuse_conn *fc)
 void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
 {
        struct fuse_iqueue *fiq = &fc->iq;
-       bool async;
 
        if (test_and_set_bit(FR_FINISHED, &req->flags))
                goto put_request;
 
-       async = req->args->end;
        /*
         * test_and_set_bit() implies smp_mb() between bit
         * changing and below intr_entry check. Pairs with
@@ -324,7 +322,7 @@ void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
                wake_up(&req->waitq);
        }
 
-       if (async)
+       if (test_bit(FR_ASYNC, &req->flags))
                req->args->end(fc, req->args, req->out.h.error);
 put_request:
        fuse_put_request(fc, req);
@@ -471,6 +469,8 @@ static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
        req->in.h.opcode = args->opcode;
        req->in.h.nodeid = args->nodeid;
        req->args = args;
+       if (args->end)
+               __set_bit(FR_ASYNC, &req->flags);
 }
 
 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
index aa75e2305b7587a8010c3aef405b44aa777f3ae3..ca344bf714045a9408f8a4f9bbcab532e7b7269c 100644 (file)
@@ -301,6 +301,7 @@ struct fuse_io_priv {
  * FR_SENT:            request is in userspace, waiting for an answer
  * FR_FINISHED:                request is finished
  * FR_PRIVATE:         request is on private list
+ * FR_ASYNC:           request is asynchronous
  */
 enum fuse_req_flag {
        FR_ISREPLY,
@@ -314,6 +315,7 @@ enum fuse_req_flag {
        FR_SENT,
        FR_FINISHED,
        FR_PRIVATE,
+       FR_ASYNC,
 };
 
 /**
index 2716d56ed0a07e537935c371e19c21c8f986670a..8294851a9dd99acbd548579a38f5cab5745bafd8 100644 (file)
@@ -1248,7 +1248,7 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
                if (!(file->f_mode & FMODE_OPENED))
                        return finish_no_open(file, d);
                dput(d);
-               return 0;
+               return excl && (flags & O_CREAT) ? -EEXIST : 0;
        }
 
        BUG_ON(d != NULL);
index 7d57068b6b7aedbc3ddde2b91179853b7e8c6f43..93d9252a00ab4b2ed0fbb274b28e1df468aa71ca 100644 (file)
@@ -138,6 +138,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
        inode->i_sb = sb;
        inode->i_blkbits = sb->s_blocksize_bits;
        inode->i_flags = 0;
+       atomic64_set(&inode->i_sequence, 0);
        atomic_set(&inode->i_count, 1);
        inode->i_op = &empty_iops;
        inode->i_fop = &no_open_fops;
index c06082bb039ac890e9fd84b095b6f5a62700fff1..1b2517291b7870ebb49c3999d9a19c62b6f8bb25 100644 (file)
@@ -191,7 +191,6 @@ struct fixed_file_data {
        struct llist_head               put_llist;
        struct work_struct              ref_work;
        struct completion               done;
-       struct rcu_head                 rcu;
 };
 
 struct io_ring_ctx {
@@ -5331,24 +5330,21 @@ static void io_file_ref_kill(struct percpu_ref *ref)
        complete(&data->done);
 }
 
-static void __io_file_ref_exit_and_free(struct rcu_head *rcu)
+static void io_file_ref_exit_and_free(struct work_struct *work)
 {
-       struct fixed_file_data *data = container_of(rcu, struct fixed_file_data,
-                                                       rcu);
-       percpu_ref_exit(&data->refs);
-       kfree(data);
-}
+       struct fixed_file_data *data;
+
+       data = container_of(work, struct fixed_file_data, ref_work);
 
-static void io_file_ref_exit_and_free(struct rcu_head *rcu)
-{
        /*
-        * We need to order our exit+free call against the potentially
-        * existing call_rcu() for switching to atomic. One way to do that
-        * is to have this rcu callback queue the final put and free, as we
-        * could otherwise have a pre-existing atomic switch complete _after_
-        * the free callback we queued.
+        * Ensure any percpu-ref atomic switch callback has run, it could have
+        * been in progress when the files were being unregistered. Once
+        * that's done, we can safely exit and free the ref and containing
+        * data structure.
         */
-       call_rcu(rcu, __io_file_ref_exit_and_free);
+       rcu_barrier();
+       percpu_ref_exit(&data->refs);
+       kfree(data);
 }
 
 static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
@@ -5369,7 +5365,8 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
        for (i = 0; i < nr_tables; i++)
                kfree(data->table[i].files);
        kfree(data->table);
-       call_rcu(&data->rcu, io_file_ref_exit_and_free);
+       INIT_WORK(&data->ref_work, io_file_ref_exit_and_free);
+       queue_work(system_wq, &data->ref_work);
        ctx->file_data = NULL;
        ctx->nr_user_files = 0;
        return 0;
index 989c30c98511d9d8404803803e4d1a9b7a462a9a..f1ff3076e4a461d80830103160515576685fee33 100644 (file)
@@ -153,6 +153,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
        if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL)
                goto error_0;
 
+       clp->cl_minorversion = cl_init->minorversion;
        clp->cl_nfs_mod = cl_init->nfs_mod;
        if (!try_module_get(clp->cl_nfs_mod->owner))
                goto error_dealloc;
index e1b938457ab9f528d61f44f8d6e83dd45731ce21..e113fcb4bb4c4b7cdfe704cf568ac27430a6b9b3 100644 (file)
@@ -832,6 +832,8 @@ static int nfs_parse_source(struct fs_context *fc,
        if (len > maxnamlen)
                goto out_hostname;
 
+       kfree(ctx->nfs_server.hostname);
+
        /* N.B. caller will free nfs_server.hostname in all cases */
        ctx->nfs_server.hostname = kmemdup_nul(dev_name, len, GFP_KERNEL);
        if (!ctx->nfs_server.hostname)
@@ -1240,6 +1242,13 @@ static int nfs_fs_context_validate(struct fs_context *fc)
                }
                ctx->nfs_mod = nfs_mod;
        }
+
+       /* Ensure the filesystem context has the correct fs_type */
+       if (fc->fs_type != ctx->nfs_mod->nfs_fs) {
+               module_put(fc->fs_type->owner);
+               __module_get(ctx->nfs_mod->nfs_fs->owner);
+               fc->fs_type = ctx->nfs_mod->nfs_fs;
+       }
        return 0;
 
 out_no_device_name:
index 52270bfac120b99f3464af019c0ccc74078b8b92..1abf126c2df45b7e6d5815745db756db6b4659d0 100644 (file)
@@ -31,6 +31,7 @@ static DEFINE_SPINLOCK(nfs_fscache_keys_lock);
 struct nfs_server_key {
        struct {
                uint16_t        nfsversion;             /* NFS protocol version */
+               uint32_t        minorversion;           /* NFSv4 minor version */
                uint16_t        family;                 /* address family */
                __be16          port;                   /* IP port */
        } hdr;
@@ -55,6 +56,7 @@ void nfs_fscache_get_client_cookie(struct nfs_client *clp)
 
        memset(&key, 0, sizeof(key));
        key.hdr.nfsversion = clp->rpc_ops->version;
+       key.hdr.minorversion = clp->cl_minorversion;
        key.hdr.family = clp->cl_addr.ss_family;
 
        switch (clp->cl_addr.ss_family) {
index ad60774049473d3f600267282c275f217aa68510..f3ece8ed32033ab3e4e4e807b9b128b54c1da95a 100644 (file)
@@ -153,7 +153,7 @@ struct vfsmount *nfs_d_automount(struct path *path)
        /* Open a new filesystem context, transferring parameters from the
         * parent superblock, including the network namespace.
         */
-       fc = fs_context_for_submount(&nfs_fs_type, path->dentry);
+       fc = fs_context_for_submount(path->mnt->mnt_sb->s_type, path->dentry);
        if (IS_ERR(fc))
                return ERR_CAST(fc);
 
index 0cd767e5c9775437aebb7e263de46e4874b7f0de..0bd77cc1f639fa27eb5ffbe0a59081285adad215 100644 (file)
@@ -216,7 +216,6 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
        INIT_LIST_HEAD(&clp->cl_ds_clients);
        rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
        clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
-       clp->cl_minorversion = cl_init->minorversion;
        clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
        clp->cl_mig_gen = 1;
 #if IS_ENABLED(CONFIG_NFS_V4_1)
index 0788b3715731186f326a24296ed3b00ad870bb46..b69d6eed67e6a93753cce13080ebb2208dee3a0b 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -860,9 +860,6 @@ static int do_dentry_open(struct file *f,
  * the return value of d_splice_alias(), then the caller needs to perform dput()
  * on it after finish_open().
  *
- * On successful return @file is a fully instantiated open file.  After this, if
- * an error occurs in ->atomic_open(), it needs to clean up with fput().
- *
  * Returns zero on success or -errno if the open failed.
  */
 int finish_open(struct file *file, struct dentry *dentry,
index 444e2da4f60e213c28109204fdd997dab4b863a3..714c14c47ca5573053b43fb605c69761d2157562 100644 (file)
@@ -93,6 +93,7 @@ config OVERLAY_FS_XINO_AUTO
        bool "Overlayfs: auto enable inode number mapping"
        default n
        depends on OVERLAY_FS
+       depends on 64BIT
        help
          If this config option is enabled then overlay filesystems will use
          unused high bits in undelying filesystem inode numbers to map all
index a5317216de7368f30a15a49a93a3cbfabe7eb71d..87c362f65448b956f5ca425ac40f728dae408dab 100644 (file)
@@ -244,6 +244,9 @@ static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req)
        if (iocb->ki_flags & IOCB_WRITE) {
                struct inode *inode = file_inode(orig_iocb->ki_filp);
 
+               /* Actually acquired in ovl_write_iter() */
+               __sb_writers_acquired(file_inode(iocb->ki_filp)->i_sb,
+                                     SB_FREEZE_WRITE);
                file_end_write(iocb->ki_filp);
                ovl_copyattr(ovl_inode_real(inode), inode);
        }
@@ -346,6 +349,9 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
                        goto out;
 
                file_start_write(real.file);
+               /* Pacify lockdep, same trick as done in aio_write() */
+               __sb_writers_release(file_inode(real.file)->i_sb,
+                                    SB_FREEZE_WRITE);
                aio_req->fd = real;
                real.flags = 0;
                aio_req->orig_iocb = iocb;
index 3623d28aa4fa7d93252f456468f14af5648b80d3..3d3f2b8bdae54fb27aa25f5f15bbef0df5aa384d 100644 (file)
@@ -318,7 +318,12 @@ static inline unsigned int ovl_xino_bits(struct super_block *sb)
        return ovl_same_dev(sb) ? OVL_FS(sb)->xino_mode : 0;
 }
 
-static inline int ovl_inode_lock(struct inode *inode)
+static inline void ovl_inode_lock(struct inode *inode)
+{
+       mutex_lock(&OVL_I(inode)->lock);
+}
+
+static inline int ovl_inode_lock_interruptible(struct inode *inode)
 {
        return mutex_lock_interruptible(&OVL_I(inode)->lock);
 }
index 319fe0d355b0b33b5cda8f3227f17a09e8de7049..ac967f1cb6e505725515863395d273122e013129 100644 (file)
@@ -1411,6 +1411,8 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
                if (ofs->config.xino == OVL_XINO_ON)
                        pr_info("\"xino=on\" is useless with all layers on same fs, ignore.\n");
                ofs->xino_mode = 0;
+       } else if (ofs->config.xino == OVL_XINO_OFF) {
+               ofs->xino_mode = -1;
        } else if (ofs->config.xino == OVL_XINO_ON && ofs->xino_mode < 0) {
                /*
                 * This is a roundup of number of bits needed for encoding
@@ -1623,8 +1625,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_stack_depth = 0;
        sb->s_maxbytes = MAX_LFS_FILESIZE;
        /* Assume underlaying fs uses 32bit inodes unless proven otherwise */
-       if (ofs->config.xino != OVL_XINO_OFF)
+       if (ofs->config.xino != OVL_XINO_OFF) {
                ofs->xino_mode = BITS_PER_LONG - 32;
+               if (!ofs->xino_mode) {
+                       pr_warn("xino not supported on 32bit kernel, falling back to xino=off.\n");
+                       ofs->config.xino = OVL_XINO_OFF;
+               }
+       }
 
        /* alloc/destroy_inode needed for setting up traps in inode cache */
        sb->s_op = &ovl_super_operations;
index ea005085803f0c5a6eb54ae19f8c6b19d2089c38..042f7eb4f7f419644ec6f4c0f5b4ddae23650a33 100644 (file)
@@ -509,7 +509,7 @@ int ovl_copy_up_start(struct dentry *dentry, int flags)
        struct inode *inode = d_inode(dentry);
        int err;
 
-       err = ovl_inode_lock(inode);
+       err = ovl_inode_lock_interruptible(inode);
        if (!err && ovl_already_copied_up_locked(dentry, flags)) {
                err = 1; /* Already copied up */
                ovl_inode_unlock(inode);
@@ -764,7 +764,7 @@ int ovl_nlink_start(struct dentry *dentry)
                        return err;
        }
 
-       err = ovl_inode_lock(inode);
+       err = ovl_inode_lock_interruptible(inode);
        if (err)
                return err;
 
index 4e6dc840b1592c53cb38b3687a2ff3b55fa8364f..9ecb3c1f0f15d4bc7ebe34f5682fbba7b780ee1f 100644 (file)
@@ -33,7 +33,8 @@ bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE],
                             const u8 secret[CURVE25519_KEY_SIZE],
                             const u8 basepoint[CURVE25519_KEY_SIZE])
 {
-       if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519))
+       if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519) &&
+           (!IS_ENABLED(CONFIG_CRYPTO_CURVE25519_X86) || IS_ENABLED(CONFIG_AS_ADX)))
                curve25519_arch(mypublic, secret, basepoint);
        else
                curve25519_generic(mypublic, secret, basepoint);
@@ -49,7 +50,8 @@ __must_check curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE],
                                    CURVE25519_KEY_SIZE)))
                return false;
 
-       if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519))
+       if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519) &&
+           (!IS_ENABLED(CONFIG_CRYPTO_CURVE25519_X86) || IS_ENABLED(CONFIG_AS_ADX)))
                curve25519_base_arch(pub, secret);
        else
                curve25519_generic(pub, secret, curve25519_base_point);
index 0f2b8423ce1d13c9bf35a7b18b55335f82bd31ed..65ac6eb6c7330615457f5d13cacbedadf681d55c 100644 (file)
 #define IMX8MN_CLK_I2C1                                105
 #define IMX8MN_CLK_I2C2                                106
 #define IMX8MN_CLK_I2C3                                107
-#define IMX8MN_CLK_I2C4                                118
-#define IMX8MN_CLK_UART1                       119
+#define IMX8MN_CLK_I2C4                                108
+#define IMX8MN_CLK_UART1                       109
 #define IMX8MN_CLK_UART2                       110
 #define IMX8MN_CLK_UART3                       111
 #define IMX8MN_CLK_UART4                       112
index d7ddebd0cdec2e2eef29b24cce984fd5ef93e9e6..e75d2191226b9144b1eb102f6679bf6817a3005f 100644 (file)
@@ -62,6 +62,7 @@ struct css_task_iter {
        struct list_head                *mg_tasks_head;
        struct list_head                *dying_tasks_head;
 
+       struct list_head                *cur_tasks_head;
        struct css_set                  *cur_cset;
        struct css_set                  *cur_dcset;
        struct task_struct              *cur_task;
index f64ca27dc210469a9da89ac4083bd0532e2abf29..d7bf029df737d3b52a93f490b2d3109f3c3247a8 100644 (file)
@@ -69,19 +69,23 @@ struct dmar_pci_notify_info {
 extern struct rw_semaphore dmar_global_lock;
 extern struct list_head dmar_drhd_units;
 
-#define for_each_drhd_unit(drhd) \
-       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)
+#define for_each_drhd_unit(drhd)                                       \
+       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list,           \
+                               dmar_rcu_check())
 
 #define for_each_active_drhd_unit(drhd)                                        \
-       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)           \
+       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list,           \
+                               dmar_rcu_check())                       \
                if (drhd->ignored) {} else
 
 #define for_each_active_iommu(i, drhd)                                 \
-       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)           \
+       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list,           \
+                               dmar_rcu_check())                       \
                if (i=drhd->iommu, drhd->ignored) {} else
 
 #define for_each_iommu(i, drhd)                                                \
-       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)           \
+       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list,           \
+                               dmar_rcu_check())                       \
                if (i=drhd->iommu, 0) {} else 
 
 static inline bool dmar_rcu_check(void)
index 3cd4fe6b845e7ff8c6263488030bd3553c302850..abedbffe2c9e41e2c4711592bd2776e3d9dcbb77 100644 (file)
@@ -698,6 +698,7 @@ struct inode {
                struct rcu_head         i_rcu;
        };
        atomic64_t              i_version;
+       atomic64_t              i_sequence; /* see futex */
        atomic_t                i_count;
        atomic_t                i_dio_count;
        atomic_t                i_writecount;
index 5cc3fed27d4c2e142d2edf1d582785c5902561e1..b70df27d7e85c2c257c3467780d7b835b58d9fe1 100644 (file)
@@ -31,23 +31,26 @@ struct task_struct;
 
 union futex_key {
        struct {
+               u64 i_seq;
                unsigned long pgoff;
-               struct inode *inode;
-               int offset;
+               unsigned int offset;
        } shared;
        struct {
+               union {
+                       struct mm_struct *mm;
+                       u64 __tmp;
+               };
                unsigned long address;
-               struct mm_struct *mm;
-               int offset;
+               unsigned int offset;
        } private;
        struct {
+               u64 ptr;
                unsigned long word;
-               void *ptr;
-               int offset;
+               unsigned int offset;
        } both;
 };
 
-#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
+#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } }
 
 #ifdef CONFIG_FUTEX
 enum {
index 6fbe58538ad6303fed1aa86898d8b5d9dcc82f8d..07dc91835b9891f41f6f098b8d65ae91d2cc9c16 100644 (file)
@@ -245,18 +245,6 @@ static inline bool disk_part_scan_enabled(struct gendisk *disk)
                !(disk->flags & GENHD_FL_NO_PART_SCAN);
 }
 
-static inline bool disk_has_partitions(struct gendisk *disk)
-{
-       bool ret = false;
-
-       rcu_read_lock();
-       if (rcu_dereference(disk->part_tbl)->len > 1)
-               ret = true;
-       rcu_read_unlock();
-
-       return ret;
-}
-
 static inline dev_t disk_devt(struct gendisk *disk)
 {
        return MKDEV(disk->major, disk->first_minor);
@@ -298,6 +286,7 @@ extern void disk_part_iter_exit(struct disk_part_iter *piter);
 
 extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
                                             sector_t sector);
+bool disk_has_partitions(struct gendisk *disk);
 
 /*
  * Macros to operate on percpu disk statistics:
index 39faaaf843e1efb864ad148c710ed9cec7a82381..c91cf2dee12abd9fe4225c7000dcc2c25fbef0ed 100644 (file)
@@ -2,15 +2,10 @@
 #ifndef _INET_DIAG_H_
 #define _INET_DIAG_H_ 1
 
+#include <net/netlink.h>
 #include <uapi/linux/inet_diag.h>
 
-struct net;
-struct sock;
 struct inet_hashinfo;
-struct nlattr;
-struct nlmsghdr;
-struct sk_buff;
-struct netlink_callback;
 
 struct inet_diag_handler {
        void            (*dump)(struct sk_buff *skb,
@@ -62,6 +57,17 @@ int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
 
 void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
 
+static inline size_t inet_diag_msg_attrs_size(void)
+{
+       return    nla_total_size(1)  /* INET_DIAG_SHUTDOWN */
+               + nla_total_size(1)  /* INET_DIAG_TOS */
+#if IS_ENABLED(CONFIG_IPV6)
+               + nla_total_size(1)  /* INET_DIAG_TCLASS */
+               + nla_total_size(1)  /* INET_DIAG_SKV6ONLY */
+#endif
+               + nla_total_size(4)  /* INET_DIAG_MARK */
+               + nla_total_size(4); /* INET_DIAG_CLASS_ID */
+}
 int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
                             struct inet_diag_msg *r, int ext,
                             struct user_namespace *user_ns, bool net_admin);
index 4a16b39ae353ee8be6c908a597575c33aaa0cc25..980234ae0312259e938f0dc5c63dc4209fa7aed8 100644 (file)
 
 #define dmar_readq(a) readq(a)
 #define dmar_writeq(a,v) writeq(v,a)
+#define dmar_readl(a) readl(a)
+#define dmar_writel(a, v) writel(v, a)
 
 #define DMAR_VER_MAJOR(v)              (((v) & 0xf0) >> 4)
 #define DMAR_VER_MINOR(v)              ((v) & 0x0f)
index ba703384bea0c8d4fa2b37717b8656f7e9a04227..4c5eb3aa8e723e8368ed2f9737b460d31224b6e4 100644 (file)
@@ -333,6 +333,7 @@ struct mmc_host {
                                 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \
                                 MMC_CAP_UHS_DDR50)
 #define MMC_CAP_SYNC_RUNTIME_PM        (1 << 21)       /* Synced runtime PM suspends. */
+#define MMC_CAP_NEED_RSP_BUSY  (1 << 22)       /* Commands with R1B can't use R1. */
 #define MMC_CAP_DRIVER_TYPE_A  (1 << 23)       /* Host supports Driver Type A */
 #define MMC_CAP_DRIVER_TYPE_C  (1 << 24)       /* Host supports Driver Type C */
 #define MMC_CAP_DRIVER_TYPE_D  (1 << 25)       /* Host supports Driver Type D */
index c86fcad23fc21725a14a98d45adc006b38c789ec..31b73a0da9db33e8a61ae7e30b8d6fe5830d8272 100644 (file)
@@ -11,17 +11,17 @@ struct of_device_id;
 
 #if defined(CONFIG_COMMON_CLK) && defined(CONFIG_OF)
 
-unsigned int of_clk_get_parent_count(struct device_node *np);
-const char *of_clk_get_parent_name(struct device_node *np, int index);
+unsigned int of_clk_get_parent_count(const struct device_node *np);
+const char *of_clk_get_parent_name(const struct device_node *np, int index);
 void of_clk_init(const struct of_device_id *matches);
 
 #else /* !CONFIG_COMMON_CLK || !CONFIG_OF */
 
-static inline unsigned int of_clk_get_parent_count(struct device_node *np)
+static inline unsigned int of_clk_get_parent_count(const struct device_node *np)
 {
        return 0;
 }
-static inline const char *of_clk_get_parent_name(struct device_node *np,
+static inline const char *of_clk_get_parent_name(const struct device_node *np,
                                                 int index)
 {
        return NULL;
index c570e162e05e58745d613325b0d93a6bbabf3ace..452e8ba8665f1078f5777e38a3acb293483c49df 100644 (file)
@@ -357,6 +357,7 @@ struct macsec_ops;
  * is_gigabit_capable: Set to true if PHY supports 1000Mbps
  * has_fixups: Set to true if this phy has fixups/quirks.
  * suspended: Set to true if this phy has been suspended successfully.
+ * suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus.
  * sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
  * loopback_enabled: Set true if this phy has been loopbacked successfully.
  * state: state of the PHY for management purposes
@@ -396,6 +397,7 @@ struct phy_device {
        unsigned is_gigabit_capable:1;
        unsigned has_fixups:1;
        unsigned suspended:1;
+       unsigned suspended_by_mdio_bus:1;
        unsigned sysfs_links:1;
        unsigned loopback_enabled:1;
 
@@ -557,6 +559,7 @@ struct phy_driver {
        /*
         * Checks if the PHY generated an interrupt.
         * For multi-PHY devices with shared PHY interrupt pin
+        * Set interrupt bits have to be cleared.
         */
        int (*did_interrupt)(struct phy_device *phydev);
 
index 276a03c246919ee57be5495808a4043c7abdcaac..041bfa412aa09521813c057fbfc3f273dfda062b 100644 (file)
@@ -24,7 +24,7 @@ struct platform_device {
        int             id;
        bool            id_auto;
        struct device   dev;
-       u64             dma_mask;
+       u64             platform_dma_mask;
        u32             num_resources;
        struct resource *resource;
 
index beb9a9da16994cac25b2d564c9aaaf75376efc66..70ebef866cc82ada0c6c1f69cc2a57e23cb732ba 100644 (file)
@@ -972,9 +972,9 @@ static inline int rhashtable_lookup_insert_key(
 /**
  * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
  * @ht:                hash table
+ * @key:       key
  * @obj:       pointer to hash head inside object
  * @params:    hash table parameters
- * @data:      pointer to element data already in hashes
  *
  * Just like rhashtable_lookup_insert_key(), but this function returns the
  * object if it exists, NULL if it does not and the insertion was successful,
index 4261d1c6e87b1a24fafa650dab1c1092cdbcd926..e48554e6526c0f09dd2f2c0d89477b851d8b6f05 100644 (file)
@@ -487,6 +487,19 @@ extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
  *
  * We queue the work to the CPU on which it was submitted, but if the CPU dies
  * it can be processed by another CPU.
+ *
+ * Memory-ordering properties:  If it returns %true, guarantees that all stores
+ * preceding the call to queue_work() in the program order will be visible from
+ * the CPU which will execute @work by the time such work executes, e.g.,
+ *
+ * { x is initially 0 }
+ *
+ *   CPU0                              CPU1
+ *
+ *   WRITE_ONCE(x, 1);                 [ @work is being executed ]
+ *   r0 = queue_work(wq, work);                  r1 = READ_ONCE(x);
+ *
+ * Forbids: r0 == true && r1 == 0
  */
 static inline bool queue_work(struct workqueue_struct *wq,
                              struct work_struct *work)
@@ -546,6 +559,9 @@ static inline bool schedule_work_on(int cpu, struct work_struct *work)
  * This puts a job in the kernel-global workqueue if it was not already
  * queued and leaves it in the same position on the kernel-global
  * workqueue otherwise.
+ *
+ * Shares the same memory-ordering properties of queue_work(), cf. the
+ * DocBook header of queue_work().
  */
 static inline bool schedule_work(struct work_struct *work)
 {
index 54e227e6b06a78902941ea20be01da76ac7ae08b..a259050f84afcda7076a894226ed268bc116a8f0 100644 (file)
@@ -108,6 +108,7 @@ struct fib_rule_notifier_info {
        [FRA_OIFNAME]   = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
        [FRA_PRIORITY]  = { .type = NLA_U32 }, \
        [FRA_FWMARK]    = { .type = NLA_U32 }, \
+       [FRA_TUN_ID]    = { .type = NLA_U64 }, \
        [FRA_FWMASK]    = { .type = NLA_U32 }, \
        [FRA_TABLE]     = { .type = NLA_U32 }, \
        [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
index 0a50d53bbd3f8d9f76a2a3b7a0f71865da7147cd..7c08437061fcdb158292328a0b6e17c816f74b16 100644 (file)
@@ -74,7 +74,7 @@
 #define DEV_MAC_TAGS_CFG_TAG_ID_M                         GENMASK(31, 16)
 #define DEV_MAC_TAGS_CFG_TAG_ID_X(x)                      (((x) & GENMASK(31, 16)) >> 16)
 #define DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA                 BIT(2)
-#define DEV_MAC_TAGS_CFG_PB_ENA                           BIT(1)
+#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA                 BIT(1)
 #define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA                     BIT(0)
 
 #define DEV_MAC_ADV_CHK_CFG                               0x2c
index 1521073b634800f5ee1c01e08ff86742dab0d2f5..8533bf07450f0b483678a125e272af4c6505d9dc 100644 (file)
@@ -74,6 +74,8 @@ enum {
 #define IPPROTO_UDPLITE                IPPROTO_UDPLITE
   IPPROTO_MPLS = 137,          /* MPLS in IP (RFC 4023)                */
 #define IPPROTO_MPLS           IPPROTO_MPLS
+  IPPROTO_ETHERNET = 143,      /* Ethernet-within-IPv6 Encapsulation   */
+#define IPPROTO_ETHERNET       IPPROTO_ETHERNET
   IPPROTO_RAW = 255,           /* Raw IP packets                       */
 #define IPPROTO_RAW            IPPROTO_RAW
   IPPROTO_MPTCP = 262,         /* Multipath TCP connection             */
index be1a1c83cdd1808a16d695ef42370cd667fd5aed..f2d7cea86ffe187788c9247834fe8bcc90f8fc8b 100644 (file)
@@ -471,6 +471,7 @@ static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
         */
        p++;
        if (p >= end) {
+               (*pos)++;
                return NULL;
        } else {
                *pos = *p;
@@ -782,7 +783,7 @@ void cgroup1_release_agent(struct work_struct *work)
 
        pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
        agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
-       if (!pathbuf || !agentbuf)
+       if (!pathbuf || !agentbuf || !strlen(agentbuf))
                goto out;
 
        spin_lock_irq(&css_set_lock);
index 75f687301bbfdf1d25825370e823c82a53cfe27c..3dead0416b9164f91113f28d921dd8dc5390f3f6 100644 (file)
@@ -3542,21 +3542,21 @@ static int cpu_stat_show(struct seq_file *seq, void *v)
 static int cgroup_io_pressure_show(struct seq_file *seq, void *v)
 {
        struct cgroup *cgrp = seq_css(seq)->cgroup;
-       struct psi_group *psi = cgroup_id(cgrp) == 1 ? &psi_system : &cgrp->psi;
+       struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
 
        return psi_show(seq, psi, PSI_IO);
 }
 static int cgroup_memory_pressure_show(struct seq_file *seq, void *v)
 {
        struct cgroup *cgrp = seq_css(seq)->cgroup;
-       struct psi_group *psi = cgroup_id(cgrp) == 1 ? &psi_system : &cgrp->psi;
+       struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
 
        return psi_show(seq, psi, PSI_MEM);
 }
 static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v)
 {
        struct cgroup *cgrp = seq_css(seq)->cgroup;
-       struct psi_group *psi = cgroup_id(cgrp) == 1 ? &psi_system : &cgrp->psi;
+       struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
 
        return psi_show(seq, psi, PSI_CPU);
 }
@@ -4400,12 +4400,16 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
                }
        } while (!css_set_populated(cset) && list_empty(&cset->dying_tasks));
 
-       if (!list_empty(&cset->tasks))
+       if (!list_empty(&cset->tasks)) {
                it->task_pos = cset->tasks.next;
-       else if (!list_empty(&cset->mg_tasks))
+               it->cur_tasks_head = &cset->tasks;
+       } else if (!list_empty(&cset->mg_tasks)) {
                it->task_pos = cset->mg_tasks.next;
-       else
+               it->cur_tasks_head = &cset->mg_tasks;
+       } else {
                it->task_pos = cset->dying_tasks.next;
+               it->cur_tasks_head = &cset->dying_tasks;
+       }
 
        it->tasks_head = &cset->tasks;
        it->mg_tasks_head = &cset->mg_tasks;
@@ -4463,10 +4467,14 @@ static void css_task_iter_advance(struct css_task_iter *it)
                else
                        it->task_pos = it->task_pos->next;
 
-               if (it->task_pos == it->tasks_head)
+               if (it->task_pos == it->tasks_head) {
                        it->task_pos = it->mg_tasks_head->next;
-               if (it->task_pos == it->mg_tasks_head)
+                       it->cur_tasks_head = it->mg_tasks_head;
+               }
+               if (it->task_pos == it->mg_tasks_head) {
                        it->task_pos = it->dying_tasks_head->next;
+                       it->cur_tasks_head = it->dying_tasks_head;
+               }
                if (it->task_pos == it->dying_tasks_head)
                        css_task_iter_advance_css_set(it);
        } else {
@@ -4485,11 +4493,12 @@ static void css_task_iter_advance(struct css_task_iter *it)
                        goto repeat;
 
                /* and dying leaders w/o live member threads */
-               if (!atomic_read(&task->signal->live))
+               if (it->cur_tasks_head == it->dying_tasks_head &&
+                   !atomic_read(&task->signal->live))
                        goto repeat;
        } else {
                /* skip all dying ones */
-               if (task->flags & PF_EXITING)
+               if (it->cur_tasks_head == it->dying_tasks_head)
                        goto repeat;
        }
 }
@@ -4595,6 +4604,9 @@ static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos)
        struct kernfs_open_file *of = s->private;
        struct css_task_iter *it = of->priv;
 
+       if (pos)
+               (*pos)++;
+
        return css_task_iter_next(it);
 }
 
@@ -4610,7 +4622,7 @@ static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
         * from position 0, so we can simply keep iterating on !0 *pos.
         */
        if (!it) {
-               if (WARN_ON_ONCE((*pos)++))
+               if (WARN_ON_ONCE((*pos)))
                        return ERR_PTR(-EINVAL);
 
                it = kzalloc(sizeof(*it), GFP_KERNEL);
@@ -4618,10 +4630,11 @@ static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
                        return ERR_PTR(-ENOMEM);
                of->priv = it;
                css_task_iter_start(&cgrp->self, iter_flags, it);
-       } else if (!(*pos)++) {
+       } else if (!(*pos)) {
                css_task_iter_end(it);
                css_task_iter_start(&cgrp->self, iter_flags, it);
-       }
+       } else
+               return it->cur_task;
 
        return cgroup_procs_next(s, NULL, NULL);
 }
@@ -6258,6 +6271,10 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
                return;
        }
 
+       /* Don't associate the sock with unrelated interrupted task's cgroup. */
+       if (in_interrupt())
+               return;
+
        rcu_read_lock();
 
        while (true) {
index 0cf84c8664f207c574325b899ef2e57f01295a94..82dfacb3250ea997aa129cf53ffa4312200bd2be 100644 (file)
@@ -385,9 +385,9 @@ static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
  */
 static struct futex_hash_bucket *hash_futex(union futex_key *key)
 {
-       u32 hash = jhash2((u32*)&key->both.word,
-                         (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
+       u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4,
                          key->both.offset);
+
        return &futex_queues[hash & (futex_hashsize - 1)];
 }
 
@@ -429,7 +429,7 @@ static void get_futex_key_refs(union futex_key *key)
 
        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
        case FUT_OFF_INODE:
-               ihold(key->shared.inode); /* implies smp_mb(); (B) */
+               smp_mb();               /* explicit smp_mb(); (B) */
                break;
        case FUT_OFF_MMSHARED:
                futex_get_mm(key); /* implies smp_mb(); (B) */
@@ -463,7 +463,6 @@ static void drop_futex_key_refs(union futex_key *key)
 
        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
        case FUT_OFF_INODE:
-               iput(key->shared.inode);
                break;
        case FUT_OFF_MMSHARED:
                mmdrop(key->private.mm);
@@ -505,6 +504,46 @@ futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
        return timeout;
 }
 
+/*
+ * Generate a machine wide unique identifier for this inode.
+ *
+ * This relies on u64 not wrapping in the life-time of the machine; which with
+ * 1ns resolution means almost 585 years.
+ *
+ * This further relies on the fact that a well formed program will not unmap
+ * the file while it has a (shared) futex waiting on it. This mapping will have
+ * a file reference which pins the mount and inode.
+ *
+ * If for some reason an inode gets evicted and read back in again, it will get
+ * a new sequence number and will _NOT_ match, even though it is the exact same
+ * file.
+ *
+ * It is important that match_futex() will never have a false-positive, esp.
+ * for PI futexes that can mess up the state. The above argues that false-negatives
+ * are only possible for malformed programs.
+ */
+static u64 get_inode_sequence_number(struct inode *inode)
+{
+       static atomic64_t i_seq;
+       u64 old;
+
+       /* Does the inode already have a sequence number? */
+       old = atomic64_read(&inode->i_sequence);
+       if (likely(old))
+               return old;
+
+       for (;;) {
+               u64 new = atomic64_add_return(1, &i_seq);
+               if (WARN_ON_ONCE(!new))
+                       continue;
+
+               old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new);
+               if (old)
+                       return old;
+               return new;
+       }
+}
+
 /**
  * get_futex_key() - Get parameters which are the keys for a futex
  * @uaddr:     virtual address of the futex
@@ -517,9 +556,15 @@ futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
  *
  * The key words are stored in @key on success.
  *
- * For shared mappings, it's (page->index, file_inode(vma->vm_file),
- * offset_within_page).  For private mappings, it's (uaddr, current->mm).
- * We can usually work out the index without swapping in the page.
+ * For shared mappings (when @fshared), the key is:
+ *   ( inode->i_sequence, page->index, offset_within_page )
+ * [ also see get_inode_sequence_number() ]
+ *
+ * For private mappings (or when !@fshared), the key is:
+ *   ( current->mm, address, 0 )
+ *
+ * This allows (cross process, where applicable) identification of the futex
+ * without keeping the page pinned for the duration of the FUTEX_WAIT.
  *
  * lock_page() might sleep, the caller should not hold a spinlock.
  */
@@ -659,8 +704,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_a
                key->private.mm = mm;
                key->private.address = address;
 
-               get_futex_key_refs(key); /* implies smp_mb(); (B) */
-
        } else {
                struct inode *inode;
 
@@ -692,40 +735,14 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_a
                        goto again;
                }
 
-               /*
-                * Take a reference unless it is about to be freed. Previously
-                * this reference was taken by ihold under the page lock
-                * pinning the inode in place so i_lock was unnecessary. The
-                * only way for this check to fail is if the inode was
-                * truncated in parallel which is almost certainly an
-                * application bug. In such a case, just retry.
-                *
-                * We are not calling into get_futex_key_refs() in file-backed
-                * cases, therefore a successful atomic_inc return below will
-                * guarantee that get_futex_key() will still imply smp_mb(); (B).
-                */
-               if (!atomic_inc_not_zero(&inode->i_count)) {
-                       rcu_read_unlock();
-                       put_page(page);
-
-                       goto again;
-               }
-
-               /* Should be impossible but lets be paranoid for now */
-               if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
-                       err = -EFAULT;
-                       rcu_read_unlock();
-                       iput(inode);
-
-                       goto out;
-               }
-
                key->both.offset |= FUT_OFF_INODE; /* inode-based key */
-               key->shared.inode = inode;
+               key->shared.i_seq = get_inode_sequence_number(inode);
                key->shared.pgoff = basepage_index(tail);
                rcu_read_unlock();
        }
 
+       get_futex_key_refs(key); /* implies smp_mb(); (B) */
+
 out:
        put_page(page);
        return err;
index 0f4ecb57214cb840d9d1d866669865316fd4a380..647b4bb457b5945d16f217ab7ad785cd2b2f84d1 100644 (file)
@@ -247,6 +247,16 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
                tmp = tmp->parent;
        }
 
+       /*
+        * ENOMEM is not the most obvious choice especially for the case
+        * where the child subreaper has already exited and the pid
+        * namespace denies the creation of any new processes. But ENOMEM
+        * is what we have exposed to userspace for a long time and it is
+        * documented behavior for pid namespaces. So we can't easily
+        * change it even if there were an error code better suited.
+        */
+       retval = -ENOMEM;
+
        if (unlikely(is_child_reaper(pid))) {
                if (pid_ns_prepare_proc(ns))
                        goto out_free;
index f9bc5c303e3f42be77cb88c5b4a630f765165ee2..d325f3ab624a9a0b41ffd83911807d683f0aba22 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/syscalls.h>
 #include <linux/kprobes.h>
 #include <linux/user_namespace.h>
+#include <linux/time_namespace.h>
 #include <linux/binfmts.h>
 
 #include <linux/sched.h>
@@ -2546,6 +2547,7 @@ static int do_sysinfo(struct sysinfo *info)
        memset(info, 0, sizeof(struct sysinfo));
 
        ktime_get_boottime_ts64(&tp);
+       timens_add_boottime(&tp);
        info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
 
        get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
index 3f7ee102868a21b2374735c8ea7492004d178ce7..fd81c7de77a7047945452682a6725390a5d2c343 100644 (file)
@@ -1547,6 +1547,8 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
                rec = bsearch(&key, pg->records, pg->index,
                              sizeof(struct dyn_ftrace),
                              ftrace_cmp_recs);
+               if (rec)
+                       break;
        }
        return rec;
 }
index 301db4406bc37ab1805806573124fe696c41518e..4e01c448b4b48fed8e1f53cbc9970422d796db53 100644 (file)
@@ -1411,14 +1411,16 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
                return;
        rcu_read_lock();
 retry:
-       if (req_cpu == WORK_CPU_UNBOUND)
-               cpu = wq_select_unbound_cpu(raw_smp_processor_id());
-
        /* pwq which will be used unless @work is executing elsewhere */
-       if (!(wq->flags & WQ_UNBOUND))
-               pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
-       else
+       if (wq->flags & WQ_UNBOUND) {
+               if (req_cpu == WORK_CPU_UNBOUND)
+                       cpu = wq_select_unbound_cpu(raw_smp_processor_id());
                pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
+       } else {
+               if (req_cpu == WORK_CPU_UNBOUND)
+                       cpu = raw_smp_processor_id();
+               pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
+       }
 
        /*
         * If @work was previously on a different pool, it might still be
index d09776cd6e1041d896aa25e726d55f93e46581ed..2058b8da18db3a2cc58da883840273d4390288cc 100644 (file)
@@ -6682,19 +6682,9 @@ void mem_cgroup_sk_alloc(struct sock *sk)
        if (!mem_cgroup_sockets_enabled)
                return;
 
-       /*
-        * Socket cloning can throw us here with sk_memcg already
-        * filled. It won't however, necessarily happen from
-        * process context. So the test for root memcg given
-        * the current task's memcg won't help us in this case.
-        *
-        * Respecting the original socket's memcg is a better
-        * decision in this case.
-        */
-       if (sk->sk_memcg) {
-               css_get(&sk->sk_memcg->css);
+       /* Do not associate the sock with unrelated interrupted task's memcg. */
+       if (in_interrupt())
                return;
-       }
 
        rcu_read_lock();
        memcg = mem_cgroup_from_task(current);
index f0209505e41ae1a4bba6989dc571906dc46f432d..a7c8dd7ae51314f3fd729484300b0796238a87c8 100644 (file)
@@ -789,6 +789,10 @@ static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
 
        lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex);
 
+       /* interface already disabled by batadv_iv_ogm_iface_disable */
+       if (!*ogm_buff)
+               return;
+
        /* the interface gets activated here to avoid race conditions between
         * the moment of activating the interface in
         * hardif_activate_interface() where the originator mac is set and
index 03c7cdd8e4cbfcfcbb9a1c66cae3f6803bde24a9..195d2d67be8a3043c48027a0a6e897660bd74072 100644 (file)
@@ -112,7 +112,8 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
            caif_device_list(dev_net(dev));
        struct caif_device_entry *caifd;
 
-       list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
+       list_for_each_entry_rcu(caifd, &caifdevs->list, list,
+                               lockdep_rtnl_is_held()) {
                if (caifd->netdev == dev)
                        return caifd;
        }
index 5e220809844c81c539d5e7506bc4095bd9a48ff4..b831c5545d6a95d049db968418d7d8ff6cb81767 100644 (file)
@@ -3352,34 +3352,41 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
                                  struct genl_info *info,
                                  union devlink_param_value *value)
 {
+       struct nlattr *param_data;
        int len;
 
-       if (param->type != DEVLINK_PARAM_TYPE_BOOL &&
-           !info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])
+       param_data = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA];
+
+       if (param->type != DEVLINK_PARAM_TYPE_BOOL && !param_data)
                return -EINVAL;
 
        switch (param->type) {
        case DEVLINK_PARAM_TYPE_U8:
-               value->vu8 = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+               if (nla_len(param_data) != sizeof(u8))
+                       return -EINVAL;
+               value->vu8 = nla_get_u8(param_data);
                break;
        case DEVLINK_PARAM_TYPE_U16:
-               value->vu16 = nla_get_u16(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+               if (nla_len(param_data) != sizeof(u16))
+                       return -EINVAL;
+               value->vu16 = nla_get_u16(param_data);
                break;
        case DEVLINK_PARAM_TYPE_U32:
-               value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+               if (nla_len(param_data) != sizeof(u32))
+                       return -EINVAL;
+               value->vu32 = nla_get_u32(param_data);
                break;
        case DEVLINK_PARAM_TYPE_STRING:
-               len = strnlen(nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]),
-                             nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
-               if (len == nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) ||
+               len = strnlen(nla_data(param_data), nla_len(param_data));
+               if (len == nla_len(param_data) ||
                    len >= __DEVLINK_PARAM_MAX_STRING_VALUE)
                        return -EINVAL;
-               strcpy(value->vstr,
-                      nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
+               strcpy(value->vstr, nla_data(param_data));
                break;
        case DEVLINK_PARAM_TYPE_BOOL:
-               value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ?
-                              true : false;
+               if (param_data && nla_len(param_data))
+                       return -EINVAL;
+               value->vbool = nla_get_flag(param_data);
                break;
        }
        return 0;
@@ -5951,6 +5958,8 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
        [DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 },
        [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING },
        [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 },
+       [DEVLINK_ATTR_REGION_CHUNK_ADDR] = { .type = NLA_U64 },
+       [DEVLINK_ATTR_REGION_CHUNK_LEN] = { .type = NLA_U64 },
        [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING },
        [DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64 },
        [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 },
index 0642f91c4038d5b97b3820c3a16669583177fc0b..b4c87fe31be2e860db70a9840ce88d8c82fe2efb 100644 (file)
@@ -53,30 +53,60 @@ static void cgrp_css_free(struct cgroup_subsys_state *css)
        kfree(css_cls_state(css));
 }
 
+/*
+ * To avoid freezing of sockets creation for tasks with big number of threads
+ * and opened sockets lets release file_lock every 1000 iterated descriptors.
+ * New sockets will already have been created with new classid.
+ */
+
+struct update_classid_context {
+       u32 classid;
+       unsigned int batch;
+};
+
+#define UPDATE_CLASSID_BATCH 1000
+
 static int update_classid_sock(const void *v, struct file *file, unsigned n)
 {
        int err;
+       struct update_classid_context *ctx = (void *)v;
        struct socket *sock = sock_from_file(file, &err);
 
        if (sock) {
                spin_lock(&cgroup_sk_update_lock);
-               sock_cgroup_set_classid(&sock->sk->sk_cgrp_data,
-                                       (unsigned long)v);
+               sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid);
                spin_unlock(&cgroup_sk_update_lock);
        }
+       if (--ctx->batch == 0) {
+               ctx->batch = UPDATE_CLASSID_BATCH;
+               return n + 1;
+       }
        return 0;
 }
 
+static void update_classid_task(struct task_struct *p, u32 classid)
+{
+       struct update_classid_context ctx = {
+               .classid = classid,
+               .batch = UPDATE_CLASSID_BATCH
+       };
+       unsigned int fd = 0;
+
+       do {
+               task_lock(p);
+               fd = iterate_fd(p->files, fd, update_classid_sock, &ctx);
+               task_unlock(p);
+               cond_resched();
+       } while (fd);
+}
+
 static void cgrp_attach(struct cgroup_taskset *tset)
 {
        struct cgroup_subsys_state *css;
        struct task_struct *p;
 
        cgroup_taskset_for_each(p, css, tset) {
-               task_lock(p);
-               iterate_fd(p->files, 0, update_classid_sock,
-                          (void *)(unsigned long)css_cls_state(css)->classid);
-               task_unlock(p);
+               update_classid_task(p, css_cls_state(css)->classid);
        }
 }
 
@@ -98,10 +128,7 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
 
        css_task_iter_start(css, 0, &it);
        while ((p = css_task_iter_next(&it))) {
-               task_lock(p);
-               iterate_fd(p->files, 0, update_classid_sock,
-                          (void *)(unsigned long)cs->classid);
-               task_unlock(p);
+               update_classid_task(p, cs->classid);
                cond_resched();
        }
        css_task_iter_end(&it);
index a4c8fac781ff3ceba7fb6c85f636e18a0de66891..8f71684305c398195a1c18e2c12d0f26b440adbb 100644 (file)
@@ -1830,7 +1830,10 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                atomic_set(&newsk->sk_zckey, 0);
 
                sock_reset_flag(newsk, SOCK_DONE);
-               mem_cgroup_sk_alloc(newsk);
+
+               /* sk->sk_memcg will be populated at accept() time */
+               newsk->sk_memcg = NULL;
+
                cgroup_sk_alloc(&newsk->sk_cgrp_data);
 
                rcu_read_lock();
index a7662e7a691d385d3f493d9e7814560e51a2b76a..760e6ea3178a7e34b787c33660aa66aa8c0cb384 100644 (file)
@@ -117,7 +117,9 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
 /* port.c */
 int dsa_port_set_state(struct dsa_port *dp, u8 state,
                       struct switchdev_trans *trans);
+int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
+void dsa_port_disable_rt(struct dsa_port *dp);
 void dsa_port_disable(struct dsa_port *dp);
 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br);
 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
index 774facb8d54754e13765611ebc0c421906de674d..ec13dc6667884f154c1fd76c823d11894860d11e 100644 (file)
@@ -63,7 +63,7 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
                pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
 }
 
-int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
+int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
 {
        struct dsa_switch *ds = dp->ds;
        int port = dp->index;
@@ -78,14 +78,31 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
        if (!dp->bridge_dev)
                dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
 
+       if (dp->pl)
+               phylink_start(dp->pl);
+
        return 0;
 }
 
-void dsa_port_disable(struct dsa_port *dp)
+int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
+{
+       int err;
+
+       rtnl_lock();
+       err = dsa_port_enable_rt(dp, phy);
+       rtnl_unlock();
+
+       return err;
+}
+
+void dsa_port_disable_rt(struct dsa_port *dp)
 {
        struct dsa_switch *ds = dp->ds;
        int port = dp->index;
 
+       if (dp->pl)
+               phylink_stop(dp->pl);
+
        if (!dp->bridge_dev)
                dsa_port_set_state_now(dp, BR_STATE_DISABLED);
 
@@ -93,6 +110,13 @@ void dsa_port_disable(struct dsa_port *dp)
                ds->ops->port_disable(ds, port);
 }
 
+void dsa_port_disable(struct dsa_port *dp)
+{
+       rtnl_lock();
+       dsa_port_disable_rt(dp);
+       rtnl_unlock();
+}
+
 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
 {
        struct dsa_notifier_bridge_info info = {
@@ -614,10 +638,6 @@ static int dsa_port_phylink_register(struct dsa_port *dp)
                goto err_phy_connect;
        }
 
-       rtnl_lock();
-       phylink_start(dp->pl);
-       rtnl_unlock();
-
        return 0;
 
 err_phy_connect:
@@ -628,9 +648,14 @@ static int dsa_port_phylink_register(struct dsa_port *dp)
 int dsa_port_link_register_of(struct dsa_port *dp)
 {
        struct dsa_switch *ds = dp->ds;
+       struct device_node *phy_np;
 
-       if (!ds->ops->adjust_link)
-               return dsa_port_phylink_register(dp);
+       if (!ds->ops->adjust_link) {
+               phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
+               if (of_phy_is_fixed_link(dp->dn) || phy_np)
+                       return dsa_port_phylink_register(dp);
+               return 0;
+       }
 
        dev_warn(ds->dev,
                 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
@@ -645,11 +670,12 @@ void dsa_port_link_unregister_of(struct dsa_port *dp)
 {
        struct dsa_switch *ds = dp->ds;
 
-       if (!ds->ops->adjust_link) {
+       if (!ds->ops->adjust_link && dp->pl) {
                rtnl_lock();
                phylink_disconnect_phy(dp->pl);
                rtnl_unlock();
                phylink_destroy(dp->pl);
+               dp->pl = NULL;
                return;
        }
 
index 088c886e609ee78d15aa139aad469471b833e057..ddc0f9236928891ffbb2f417df66ef4daf4950d6 100644 (file)
@@ -88,12 +88,10 @@ static int dsa_slave_open(struct net_device *dev)
                        goto clear_allmulti;
        }
 
-       err = dsa_port_enable(dp, dev->phydev);
+       err = dsa_port_enable_rt(dp, dev->phydev);
        if (err)
                goto clear_promisc;
 
-       phylink_start(dp->pl);
-
        return 0;
 
 clear_promisc:
@@ -114,9 +112,7 @@ static int dsa_slave_close(struct net_device *dev)
        struct net_device *master = dsa_slave_to_master(dev);
        struct dsa_port *dp = dsa_slave_to_port(dev);
 
-       phylink_stop(dp->pl);
-
-       dsa_port_disable(dp);
+       dsa_port_disable_rt(dp);
 
        dev_mc_unsync(master, dev);
        dev_uc_unsync(master, dev);
index 2c7a38d76a3a6dbfcdb2f827e7fb0b8654850d96..0672b2f01586f195e1439610b3154b15282cdd2b 100644 (file)
@@ -21,7 +21,13 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
        [IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, },
        [IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, },
        [IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_BCN_ORD] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_SF_ORD] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_PAN_COORD] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_BAT_EXT] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_COORD_REALIGN] = { .type = NLA_U8, },
        [IEEE802154_ATTR_PAGE] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_DEV_TYPE] = { .type = NLA_U8, },
        [IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, },
        [IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, },
        [IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, },
index 5fd6e8ed02b5d5c201ff8dc8cf31db3994fcdcf7..66fdbfe5447cdb93e06fe85d94646a6806401e98 100644 (file)
@@ -56,7 +56,9 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
 }
 EXPORT_SYMBOL_GPL(gre_del_protocol);
 
-/* Fills in tpi and returns header length to be pulled. */
+/* Fills in tpi and returns header length to be pulled.
+ * Note that caller must use pskb_may_pull() before pulling GRE header.
+ */
 int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                     bool *csum_err, __be16 proto, int nhs)
 {
@@ -110,8 +112,14 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
         * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
         */
        if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
+               u8 _val, *val;
+
+               val = skb_header_pointer(skb, nhs + hdr_len,
+                                        sizeof(_val), &_val);
+               if (!val)
+                       return -EINVAL;
                tpi->proto = proto;
-               if ((*(u8 *)options & 0xF0) != 0x40)
+               if ((*val & 0xF0) != 0x40)
                        hdr_len += 4;
        }
        tpi->hdr_len = hdr_len;
index a4db79b1b64301c4d7d172f7c2673a30d4307196..d545fb99a8a1c84153c4a42226d15754c1f52ca0 100644 (file)
@@ -482,8 +482,28 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
                }
                spin_unlock_bh(&queue->fastopenq.lock);
        }
+
 out:
        release_sock(sk);
+       if (newsk && mem_cgroup_sockets_enabled) {
+               int amt;
+
+               /* atomically get the memory usage, set and charge the
+                * newsk->sk_memcg.
+                */
+               lock_sock(newsk);
+
+               /* The socket has not been accepted yet, no need to look at
+                * newsk->sk_wmem_queued.
+                */
+               amt = sk_mem_pages(newsk->sk_forward_alloc +
+                                  atomic_read(&newsk->sk_rmem_alloc));
+               mem_cgroup_sk_alloc(newsk);
+               if (newsk->sk_memcg && amt)
+                       mem_cgroup_charge_skmem(newsk->sk_memcg, amt);
+
+               release_sock(newsk);
+       }
        if (req)
                reqsk_put(req);
        return newsk;
index f11e997e517b6652479acd892ffea6cdeb940e33..8c8377568a787c80540aac7e9ed6f122068010ca 100644 (file)
@@ -100,13 +100,9 @@ static size_t inet_sk_attr_size(struct sock *sk,
                aux = handler->idiag_get_aux_size(sk, net_admin);
 
        return    nla_total_size(sizeof(struct tcp_info))
-               + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
-               + nla_total_size(1) /* INET_DIAG_TOS */
-               + nla_total_size(1) /* INET_DIAG_TCLASS */
-               + nla_total_size(4) /* INET_DIAG_MARK */
-               + nla_total_size(4) /* INET_DIAG_CLASS_ID */
-               + nla_total_size(sizeof(struct inet_diag_meminfo))
                + nla_total_size(sizeof(struct inet_diag_msg))
+               + inet_diag_msg_attrs_size()
+               + nla_total_size(sizeof(struct inet_diag_meminfo))
                + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
                + nla_total_size(TCP_CA_NAME_MAX)
                + nla_total_size(sizeof(struct tcpvegas_info))
@@ -147,6 +143,24 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
        if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark))
                goto errout;
 
+       if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
+           ext & (1 << (INET_DIAG_TCLASS - 1))) {
+               u32 classid = 0;
+
+#ifdef CONFIG_SOCK_CGROUP_DATA
+               classid = sock_cgroup_classid(&sk->sk_cgrp_data);
+#endif
+               /* Fallback to socket priority if class id isn't set.
+                * Classful qdiscs use it as direct reference to class.
+                * For cgroup2 classid is always zero.
+                */
+               if (!classid)
+                       classid = sk->sk_priority;
+
+               if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
+                       goto errout;
+       }
+
        r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
        r->idiag_inode = sock_i_ino(sk);
 
@@ -284,24 +298,6 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                        goto errout;
        }
 
-       if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
-           ext & (1 << (INET_DIAG_TCLASS - 1))) {
-               u32 classid = 0;
-
-#ifdef CONFIG_SOCK_CGROUP_DATA
-               classid = sock_cgroup_classid(&sk->sk_cgrp_data);
-#endif
-               /* Fallback to socket priority if class id isn't set.
-                * Classful qdiscs use it as direct reference to class.
-                * For cgroup2 classid is always zero.
-                */
-               if (!classid)
-                       classid = sk->sk_priority;
-
-               if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
-                       goto errout;
-       }
-
 out:
        nlmsg_end(skb, nlh);
        return 0;
index e35736b993003253614de79a6c83e4fd1a0a0a73..a93e7d1e125154e7afa0a7f162429a233de64a95 100644 (file)
@@ -100,8 +100,9 @@ static int raw_diag_dump_one(struct sk_buff *in_skb,
        if (IS_ERR(sk))
                return PTR_ERR(sk);
 
-       rep = nlmsg_new(sizeof(struct inet_diag_msg) +
-                       sizeof(struct inet_diag_meminfo) + 64,
+       rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) +
+                       inet_diag_msg_attrs_size() +
+                       nla_total_size(sizeof(struct inet_diag_meminfo)) + 64,
                        GFP_KERNEL);
        if (!rep) {
                sock_put(sk);
index 910555a4d9fe2dcc1465421afac904d20147dd06..dccd2286bc2849458417956f7340ab5e06105e49 100644 (file)
@@ -64,8 +64,9 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
                goto out;
 
        err = -ENOMEM;
-       rep = nlmsg_new(sizeof(struct inet_diag_msg) +
-                       sizeof(struct inet_diag_meminfo) + 64,
+       rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) +
+                       inet_diag_msg_attrs_size() +
+                       nla_total_size(sizeof(struct inet_diag_meminfo)) + 64,
                        GFP_KERNEL);
        if (!rep)
                goto out;
index cb493e15959c4d1bb68cf30f4099a8daa785bb84..46d614b611db260d6566c7d6b4d45111a6a027ec 100644 (file)
@@ -1226,11 +1226,13 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
 }
 
 static void
-cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt)
+cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
+                    bool del_rt, bool del_peer)
 {
        struct fib6_info *f6i;
 
-       f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len,
+       f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
+                                       ifp->prefix_len,
                                        ifp->idev->dev, 0, RTF_DEFAULT, true);
        if (f6i) {
                if (del_rt)
@@ -1293,7 +1295,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
 
        if (action != CLEANUP_PREFIX_RT_NOP) {
                cleanup_prefix_route(ifp, expires,
-                       action == CLEANUP_PREFIX_RT_DEL);
+                       action == CLEANUP_PREFIX_RT_DEL, false);
        }
 
        /* clean up prefsrc entries */
@@ -3345,6 +3347,10 @@ static void addrconf_dev_config(struct net_device *dev)
            (dev->type != ARPHRD_NONE) &&
            (dev->type != ARPHRD_RAWIP)) {
                /* Alas, we support only Ethernet autoconfiguration. */
+               idev = __in6_dev_get(dev);
+               if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP &&
+                   dev->flags & IFF_MULTICAST)
+                       ipv6_mc_up(idev);
                return;
        }
 
@@ -4586,12 +4592,14 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
 }
 
 static int modify_prefix_route(struct inet6_ifaddr *ifp,
-                              unsigned long expires, u32 flags)
+                              unsigned long expires, u32 flags,
+                              bool modify_peer)
 {
        struct fib6_info *f6i;
        u32 prio;
 
-       f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len,
+       f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
+                                       ifp->prefix_len,
                                        ifp->idev->dev, 0, RTF_DEFAULT, true);
        if (!f6i)
                return -ENOENT;
@@ -4602,7 +4610,8 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
                ip6_del_rt(dev_net(ifp->idev->dev), f6i);
 
                /* add new one */
-               addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
+               addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
+                                     ifp->prefix_len,
                                      ifp->rt_priority, ifp->idev->dev,
                                      expires, flags, GFP_KERNEL);
        } else {
@@ -4624,6 +4633,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
        unsigned long timeout;
        bool was_managetempaddr;
        bool had_prefixroute;
+       bool new_peer = false;
 
        ASSERT_RTNL();
 
@@ -4655,6 +4665,13 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
                cfg->preferred_lft = timeout;
        }
 
+       if (cfg->peer_pfx &&
+           memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) {
+               if (!ipv6_addr_any(&ifp->peer_addr))
+                       cleanup_prefix_route(ifp, expires, true, true);
+               new_peer = true;
+       }
+
        spin_lock_bh(&ifp->lock);
        was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
        had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
@@ -4670,6 +4687,9 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
        if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
                ifp->rt_priority = cfg->rt_priority;
 
+       if (new_peer)
+               ifp->peer_addr = *cfg->peer_pfx;
+
        spin_unlock_bh(&ifp->lock);
        if (!(ifp->flags&IFA_F_TENTATIVE))
                ipv6_ifa_notify(0, ifp);
@@ -4678,7 +4698,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
                int rc = -ENOENT;
 
                if (had_prefixroute)
-                       rc = modify_prefix_route(ifp, expires, flags);
+                       rc = modify_prefix_route(ifp, expires, flags, false);
 
                /* prefix route could have been deleted; if so restore it */
                if (rc == -ENOENT) {
@@ -4686,6 +4706,15 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
                                              ifp->rt_priority, ifp->idev->dev,
                                              expires, flags, GFP_KERNEL);
                }
+
+               if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
+                       rc = modify_prefix_route(ifp, expires, flags, true);
+
+               if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
+                       addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
+                                             ifp->rt_priority, ifp->idev->dev,
+                                             expires, flags, GFP_KERNEL);
+               }
        } else if (had_prefixroute) {
                enum cleanup_prefix_rt_t action;
                unsigned long rt_expires;
@@ -4696,7 +4725,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
 
                if (action != CLEANUP_PREFIX_RT_NOP) {
                        cleanup_prefix_route(ifp, rt_expires,
-                               action == CLEANUP_PREFIX_RT_DEL);
+                               action == CLEANUP_PREFIX_RT_DEL, false);
                }
        }
 
@@ -5983,9 +6012,9 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
                if (ifp->idev->cnf.forwarding)
                        addrconf_join_anycast(ifp);
                if (!ipv6_addr_any(&ifp->peer_addr))
-                       addrconf_prefix_route(&ifp->peer_addr, 128, 0,
-                                             ifp->idev->dev, 0, 0,
-                                             GFP_ATOMIC);
+                       addrconf_prefix_route(&ifp->peer_addr, 128,
+                                             ifp->rt_priority, ifp->idev->dev,
+                                             0, 0, GFP_ATOMIC);
                break;
        case RTM_DELADDR:
                if (ifp->idev->cnf.forwarding)
index ab7f124ff5d7e8a69e84fae0c72c581a716266f5..8c52efe299cced6c79575033814c62d82d1f7295 100644 (file)
@@ -268,7 +268,7 @@ static int seg6_do_srh(struct sk_buff *skb)
                skb_mac_header_rebuild(skb);
                skb_push(skb, skb->mac_len);
 
-               err = seg6_do_srh_encap(skb, tinfo->srh, NEXTHDR_NONE);
+               err = seg6_do_srh_encap(skb, tinfo->srh, IPPROTO_ETHERNET);
                if (err)
                        return err;
 
index 7cbc19731997969bf7eec4ea5e1e8dd20d9e3da1..8165802d8e05dea47225264f20f995b5962565ae 100644 (file)
@@ -282,7 +282,7 @@ static int input_action_end_dx2(struct sk_buff *skb,
        struct net_device *odev;
        struct ethhdr *eth;
 
-       if (!decap_and_validate(skb, NEXTHDR_NONE))
+       if (!decap_and_validate(skb, IPPROTO_ETHERNET))
                goto drop;
 
        if (!pskb_may_pull(skb, ETH_HLEN))
index d699833703819af130c54803528aa7808e4f2c54..38a0383dfbcfaee991f9bbc05cc5d5dfabd76a90 100644 (file)
@@ -1152,7 +1152,8 @@ int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
                }
        }
 
-       if (!(mpath->flags & MESH_PATH_RESOLVING))
+       if (!(mpath->flags & MESH_PATH_RESOLVING) &&
+           mesh_path_sel_is_hwmp(sdata))
                mesh_queue_preq(mpath, PREQ_Q_F_START);
 
        if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
index 45acd877bef345259ca2a84c58cd0d375f88386f..fd2c3150e59193e189a5e85f901d21c2bfca0115 100644 (file)
@@ -334,6 +334,8 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
        struct mptcp_sock *msk;
        unsigned int ack_size;
        bool ret = false;
+       bool can_ack;
+       u64 ack_seq;
        u8 tcp_fin;
 
        if (skb) {
@@ -360,9 +362,22 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
                ret = true;
        }
 
+       /* passive sockets msk will set the 'can_ack' after accept(), even
+        * if the first subflow may have the already the remote key handy
+        */
+       can_ack = true;
        opts->ext_copy.use_ack = 0;
        msk = mptcp_sk(subflow->conn);
-       if (!msk || !READ_ONCE(msk->can_ack)) {
+       if (likely(msk && READ_ONCE(msk->can_ack))) {
+               ack_seq = msk->ack_seq;
+       } else if (subflow->can_ack) {
+               mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
+               ack_seq++;
+       } else {
+               can_ack = false;
+       }
+
+       if (unlikely(!can_ack)) {
                *size = ALIGN(dss_size, 4);
                return ret;
        }
@@ -375,7 +390,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
 
        dss_size += ack_size;
 
-       opts->ext_copy.data_ack = msk->ack_seq;
+       opts->ext_copy.data_ack = ack_seq;
        opts->ext_copy.ack64 = 1;
        opts->ext_copy.use_ack = 1;
 
index 410809c669e119d9f8e84c54e9a3cc782e7b7196..4912069627b651fcad837f40eb208553bf55cdc5 100644 (file)
@@ -411,7 +411,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                *pos = cpu + 1;
                return per_cpu_ptr(net->ct.stat, cpu);
        }
-
+       (*pos)++;
        return NULL;
 }
 
index b0930d4aba228bdf63eeb15ac521bfad482b51ea..b9cbe1e2453e82b5040b2309c9a67dda49da64d8 100644 (file)
@@ -267,7 +267,7 @@ static void *synproxy_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                *pos = cpu + 1;
                return per_cpu_ptr(snet->stats, cpu);
        }
-
+       (*pos)++;
        return NULL;
 }
 
index d1318bdf49ca97bcdc0b4dd9ff209fdb6cb183f2..38c680f28f157f1346e6047c661d93170de0f8cf 100644 (file)
@@ -1405,6 +1405,11 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
                                              lockdep_commit_lock_is_held(net));
                if (nft_dump_stats(skb, stats))
                        goto nla_put_failure;
+
+               if ((chain->flags & NFT_CHAIN_HW_OFFLOAD) &&
+                   nla_put_be32(skb, NFTA_CHAIN_FLAGS,
+                                htonl(NFT_CHAIN_HW_OFFLOAD)))
+                       goto nla_put_failure;
        }
 
        if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use)))
@@ -6300,8 +6305,13 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
                goto err4;
 
        err = nft_register_flowtable_net_hooks(ctx.net, table, flowtable);
-       if (err < 0)
+       if (err < 0) {
+               list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
+                       list_del_rcu(&hook->list);
+                       kfree_rcu(hook, rcu);
+               }
                goto err4;
+       }
 
        err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
        if (err < 0)
@@ -7378,13 +7388,8 @@ static void nf_tables_module_autoload(struct net *net)
        list_splice_init(&net->nft.module_list, &module_list);
        mutex_unlock(&net->nft.commit_mutex);
        list_for_each_entry_safe(req, next, &module_list, list) {
-               if (req->done) {
-                       list_del(&req->list);
-                       kfree(req);
-               } else {
-                       request_module("%s", req->module);
-                       req->done = true;
-               }
+               request_module("%s", req->module);
+               req->done = true;
        }
        mutex_lock(&net->nft.commit_mutex);
        list_splice(&module_list, &net->nft.module_list);
@@ -8167,6 +8172,7 @@ static void __net_exit nf_tables_exit_net(struct net *net)
        __nft_release_tables(net);
        mutex_unlock(&net->nft.commit_mutex);
        WARN_ON_ONCE(!list_empty(&net->nft.tables));
+       WARN_ON_ONCE(!list_empty(&net->nft.module_list));
 }
 
 static struct pernet_operations nf_tables_net_ops = {
index de3a9596b7f1bca045b8683bc48f66230fb65e75..a5f294aa8e4cf9c3ef361d775de6f8707d2f1143 100644 (file)
@@ -742,6 +742,8 @@ static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
        [NFCTH_NAME] = { .type = NLA_NUL_STRING,
                         .len = NF_CT_HELPER_NAME_LEN-1 },
        [NFCTH_QUEUE_NUM] = { .type = NLA_U32, },
+       [NFCTH_PRIV_DATA_LEN] = { .type = NLA_U32, },
+       [NFCTH_STATUS] = { .type = NLA_U32, },
 };
 
 static const struct nfnl_callback nfnl_cthelper_cb[NFNL_MSG_CTHELPER_MAX] = {
index ff9ac8ae0031f037efe5ce1061ceabf79fde925a..eac4a901233f24d8fab7392efa36204d1ffab909 100644 (file)
@@ -89,6 +89,7 @@ static const struct nft_chain_type nft_chain_nat_inet = {
        .name           = "nat",
        .type           = NFT_CHAIN_T_NAT,
        .family         = NFPROTO_INET,
+       .owner          = THIS_MODULE,
        .hook_mask      = (1 << NF_INET_PRE_ROUTING) |
                          (1 << NF_INET_LOCAL_IN) |
                          (1 << NF_INET_LOCAL_OUT) |
index 1993af3a2979527362bfa419a73c65f71a789363..a7de3a58f553d57885876ba730b5ab2877422101 100644 (file)
@@ -129,6 +129,7 @@ static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
        [NFTA_PAYLOAD_LEN]              = { .type = NLA_U32 },
        [NFTA_PAYLOAD_CSUM_TYPE]        = { .type = NLA_U32 },
        [NFTA_PAYLOAD_CSUM_OFFSET]      = { .type = NLA_U32 },
+       [NFTA_PAYLOAD_CSUM_FLAGS]       = { .type = NLA_U32 },
 };
 
 static int nft_payload_init(const struct nft_ctx *ctx,
index 4c3f2e24c7cba9e19f096d0c429904949c6f93ae..764e88682a81f25c4fb840a700780f65d23aaebb 100644 (file)
@@ -339,6 +339,8 @@ static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] =
        [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
        [NFTA_TUNNEL_KEY_TOS]   = { .type = NLA_U8, },
        [NFTA_TUNNEL_KEY_TTL]   = { .type = NLA_U8, },
+       [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, },
+       [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, },
        [NFTA_TUNNEL_KEY_OPTS]  = { .type = NLA_NESTED, },
 };
 
index e27c6c5ba9df880e4604a032130b057bc8c023ab..cd2b034eef59aa96fe5c4e218e4897bac8f9aae6 100644 (file)
@@ -1551,6 +1551,9 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
        uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
        struct nf_mttg_trav *trav = seq->private;
 
+       if (ppos != NULL)
+               ++(*ppos);
+
        switch (trav->class) {
        case MTTG_TRAV_INIT:
                trav->class = MTTG_TRAV_NFP_UNSPEC;
@@ -1576,9 +1579,6 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
        default:
                return NULL;
        }
-
-       if (ppos != NULL)
-               ++*ppos;
        return trav;
 }
 
index 0a9708004e205cf52f391f1b1729b06504eba207..225a7ab6d79a9480b70ded6dc4a774e0c7b9a409 100644 (file)
@@ -492,12 +492,12 @@ static void *recent_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        const struct recent_entry *e = v;
        const struct list_head *head = e->list.next;
 
+       (*pos)++;
        while (head == &t->iphash[st->bucket]) {
                if (++st->bucket >= ip_list_hash_size)
                        return NULL;
                head = t->iphash[st->bucket].next;
        }
-       (*pos)++;
        return list_entry(head, struct recent_entry, list);
 }
 
index edf3e285e242877d78b044bac89b4a41804b56cb..5313f1cec17063fb048ad80fdf0a567c4dc7357f 100644 (file)
@@ -2434,7 +2434,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
                                                               in_skb->len))
                                WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS,
                                                    (u8 *)extack->bad_attr -
-                                                   in_skb->data));
+                                                   (u8 *)nlh));
                } else {
                        if (extack->cookie_len)
                                WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE,
index 6f1b096e601c7e4c4744afc84c8dc7788935c8fa..43811b5219b5b6276663f7d527a24cf964c1665f 100644 (file)
@@ -181,13 +181,20 @@ void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
 void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
                          struct sk_buff *skb)
 {
-       u8 gate = hdev->pipes[pipe].gate;
        u8 status = NFC_HCI_ANY_OK;
        struct hci_create_pipe_resp *create_info;
        struct hci_delete_pipe_noti *delete_info;
        struct hci_all_pipe_cleared_noti *cleared_info;
+       u8 gate;
 
-       pr_debug("from gate %x pipe %x cmd %x\n", gate, pipe, cmd);
+       pr_debug("from pipe %x cmd %x\n", pipe, cmd);
+
+       if (pipe >= NFC_HCI_MAX_PIPES) {
+               status = NFC_HCI_ANY_E_NOK;
+               goto exit;
+       }
+
+       gate = hdev->pipes[pipe].gate;
 
        switch (cmd) {
        case NFC_HCI_ADM_NOTIFY_PIPE_CREATED:
@@ -375,8 +382,14 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
                            struct sk_buff *skb)
 {
        int r = 0;
-       u8 gate = hdev->pipes[pipe].gate;
+       u8 gate;
+
+       if (pipe >= NFC_HCI_MAX_PIPES) {
+               pr_err("Discarded event %x to invalid pipe %x\n", event, pipe);
+               goto exit;
+       }
 
+       gate = hdev->pipes[pipe].gate;
        if (gate == NFC_HCI_INVALID_GATE) {
                pr_err("Discarded event %x to unopened pipe %x\n", event, pipe);
                goto exit;
index eee0dddb7749e0c9dec121ea29a047226b9e4473..e894254c17d430f7055a7a333e454af6d942a913 100644 (file)
@@ -32,6 +32,7 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
        [NFC_ATTR_DEVICE_NAME] = { .type = NLA_STRING,
                                .len = NFC_DEVICE_NAME_MAXSIZE },
        [NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 },
+       [NFC_ATTR_TARGET_INDEX] = { .type = NLA_U32 },
        [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 },
        [NFC_ATTR_RF_MODE] = { .type = NLA_U8 },
        [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 },
@@ -43,7 +44,10 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
        [NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED },
        [NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING,
                                     .len = NFC_FIRMWARE_NAME_MAXSIZE },
+       [NFC_ATTR_SE_INDEX] = { .type = NLA_U32 },
        [NFC_ATTR_SE_APDU] = { .type = NLA_BINARY },
+       [NFC_ATTR_VENDOR_ID] = { .type = NLA_U32 },
+       [NFC_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 },
        [NFC_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
 
 };
index c047afd121160f4fd66d4fcd135eb15dd2dbc6e7..07a7dd185995454b86f441cca819772cce9bcef0 100644 (file)
@@ -645,6 +645,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
        [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
        [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
        [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
+       [OVS_PACKET_ATTR_HASH] = { .type = NLA_U64 },
 };
 
 static const struct genl_ops dp_packet_genl_ops[] = {
index 30c6879d67748eb9c3275fb755aabe99e6073e4a..e5b0986215d25ee7d646c4de027bab4ecf897fcc 100644 (file)
@@ -2274,6 +2274,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                                        TP_STATUS_KERNEL, (macoff+snaplen));
        if (!h.raw)
                goto drop_n_account;
+
+       if (do_vnet &&
+           virtio_net_hdr_from_skb(skb, h.raw + macoff -
+                                   sizeof(struct virtio_net_hdr),
+                                   vio_le(), true, 0))
+               goto drop_n_account;
+
        if (po->tp_version <= TPACKET_V2) {
                packet_increment_rx_head(po, &po->rx_ring);
        /*
@@ -2286,12 +2293,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                        status |= TP_STATUS_LOSING;
        }
 
-       if (do_vnet &&
-           virtio_net_hdr_from_skb(skb, h.raw + macoff -
-                                   sizeof(struct virtio_net_hdr),
-                                   vio_le(), true, 0))
-               goto drop_n_account;
-
        po->stats.stats1.tp_packets++;
        if (copy_skb) {
                status |= TP_STATUS_COPY;
index a5a295477eccd52952e26e2ce121315341dddd0f..371ad84def3b6f1b5f0d0704b67723d0a8af22c6 100644 (file)
@@ -744,6 +744,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
        [TCA_FQ_FLOW_MAX_RATE]          = { .type = NLA_U32 },
        [TCA_FQ_BUCKETS_LOG]            = { .type = NLA_U32 },
        [TCA_FQ_FLOW_REFILL_DELAY]      = { .type = NLA_U32 },
+       [TCA_FQ_ORPHAN_MASK]            = { .type = NLA_U32 },
        [TCA_FQ_LOW_RATE_THRESHOLD]     = { .type = NLA_U32 },
        [TCA_FQ_CE_THRESHOLD]           = { .type = NLA_U32 },
 };
index 660fc45ee40fc0036996f701d8c1c3184f713640..b1eb12d33b9a6ca9d608d03684d884adc257f0ec 100644 (file)
@@ -564,8 +564,10 @@ static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
                prio = skb->priority;
                tc = netdev_get_prio_tc_map(dev, prio);
 
-               if (!(gate_mask & BIT(tc)))
+               if (!(gate_mask & BIT(tc))) {
+                       skb = NULL;
                        continue;
+               }
 
                len = qdisc_pkt_len(skb);
                guard = ktime_add_ns(taprio_get_time(q),
@@ -575,13 +577,17 @@ static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
                 * guard band ...
                 */
                if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
-                   ktime_after(guard, entry->close_time))
+                   ktime_after(guard, entry->close_time)) {
+                       skb = NULL;
                        continue;
+               }
 
                /* ... and no budget. */
                if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
-                   atomic_sub_return(len, &entry->budget) < 0)
+                   atomic_sub_return(len, &entry->budget) < 0) {
+                       skb = NULL;
                        continue;
+               }
 
                skb = child->ops->dequeue(child);
                if (unlikely(!skb))
@@ -768,6 +774,7 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
        [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]           = { .type = NLA_S64 },
        [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
        [TCA_TAPRIO_ATTR_FLAGS]                      = { .type = NLA_U32 },
+       [TCA_TAPRIO_ATTR_TXTIME_DELAY]               = { .type = NLA_U32 },
 };
 
 static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
index 8a15146faaebdcb869233a08318e4fb5a1e1129b..1069d7af367290a42df7a52b2b0a3638c5b55f21 100644 (file)
@@ -237,15 +237,11 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
                addrcnt++;
 
        return    nla_total_size(sizeof(struct sctp_info))
-               + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
-               + nla_total_size(1) /* INET_DIAG_TOS */
-               + nla_total_size(1) /* INET_DIAG_TCLASS */
-               + nla_total_size(4) /* INET_DIAG_MARK */
-               + nla_total_size(4) /* INET_DIAG_CLASS_ID */
                + nla_total_size(addrlen * asoc->peer.transport_count)
                + nla_total_size(addrlen * addrcnt)
-               + nla_total_size(sizeof(struct inet_diag_meminfo))
                + nla_total_size(sizeof(struct inet_diag_msg))
+               + inet_diag_msg_attrs_size()
+               + nla_total_size(sizeof(struct inet_diag_meminfo))
                + 64;
 }
 
index d6ba186f67e2aa16d8f3822bea0950f0cb6612cc..05b825b3cfa4861cd0ab24b4ac9ad4476a75aabf 100644 (file)
@@ -582,6 +582,7 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
        smc_smcr_terminate_all(smcibdev);
        smc_ib_cleanup_per_ibdev(smcibdev);
        ib_unregister_event_handler(&smcibdev->event_handler);
+       cancel_work_sync(&smcibdev->port_event_work);
        kfree(smcibdev);
 }
 
index 7c35094c20b8b7af139a05220745af9d0fe61e04..bb9862410e689e992ccd22bb6b598ff2bdba17b2 100644 (file)
@@ -116,6 +116,7 @@ const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
        [TIPC_NLA_PROP_PRIO]            = { .type = NLA_U32 },
        [TIPC_NLA_PROP_TOL]             = { .type = NLA_U32 },
        [TIPC_NLA_PROP_WIN]             = { .type = NLA_U32 },
+       [TIPC_NLA_PROP_MTU]             = { .type = NLA_U32 },
        [TIPC_NLA_PROP_BROADCAST]       = { .type = NLA_U32 },
        [TIPC_NLA_PROP_BROADCAST_RATIO] = { .type = NLA_U32 }
 };
index 5b19e9fac4aac68e9ce222610fa8a75f9b1a9c29..ec5d67794aab67a2f7c56a880c7fd8510860dada 100644 (file)
@@ -470,6 +470,8 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED },
        [NL80211_ATTR_STA_PLINK_STATE] =
                NLA_POLICY_MAX(NLA_U8, NUM_NL80211_PLINK_STATES - 1),
+       [NL80211_ATTR_MEASUREMENT_DURATION] = { .type = NLA_U16 },
+       [NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY] = { .type = NLA_FLAG },
        [NL80211_ATTR_MESH_PEER_AID] =
                NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID),
        [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 },
@@ -531,6 +533,8 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_MDID] = { .type = NLA_U16 },
        [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
                                  .len = IEEE80211_MAX_DATA_LEN },
+       [NL80211_ATTR_CRIT_PROT_ID] = { .type = NLA_U16 },
+       [NL80211_ATTR_MAX_CRIT_PROT_DURATION] = { .type = NLA_U16 },
        [NL80211_ATTR_PEER_AID] =
                NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID),
        [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 },
@@ -561,6 +565,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
                NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_UPS - 1),
        [NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 },
        [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 },
+       [NL80211_ATTR_OPER_CLASS] = { .type = NLA_U8 },
        [NL80211_ATTR_MAC_MASK] = {
                .type = NLA_EXACT_LEN_WARN,
                .len = ETH_ALEN
index ce3c5945a1c48ebb793b83080fc5225706148d66..637189ec1ab992e5a5313a8d71c8c499ffb27411 100644 (file)
@@ -1,18 +1,18 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #if defined(__i386__) || defined(__x86_64__)
-#include "../../arch/x86/include/uapi/asm/errno.h"
+#include "../../../arch/x86/include/uapi/asm/errno.h"
 #elif defined(__powerpc__)
-#include "../../arch/powerpc/include/uapi/asm/errno.h"
+#include "../../../arch/powerpc/include/uapi/asm/errno.h"
 #elif defined(__sparc__)
-#include "../../arch/sparc/include/uapi/asm/errno.h"
+#include "../../../arch/sparc/include/uapi/asm/errno.h"
 #elif defined(__alpha__)
-#include "../../arch/alpha/include/uapi/asm/errno.h"
+#include "../../../arch/alpha/include/uapi/asm/errno.h"
 #elif defined(__mips__)
-#include "../../arch/mips/include/uapi/asm/errno.h"
+#include "../../../arch/mips/include/uapi/asm/errno.h"
 #elif defined(__ia64__)
-#include "../../arch/ia64/include/uapi/asm/errno.h"
+#include "../../../arch/ia64/include/uapi/asm/errno.h"
 #elif defined(__xtensa__)
-#include "../../arch/xtensa/include/uapi/asm/errno.h"
+#include "../../../arch/xtensa/include/uapi/asm/errno.h"
 #else
 #include <asm-generic/errno.h>
 #endif
index 8d6821d9c3f6cea8b26c8f5019d5d4f3793ef9b9..27653be244473b08701a917dad2ff72c871e1081 100644 (file)
 #include <linux/zalloc.h>
 #include <time.h>
 
-#include "../../util/cpumap.h"
-#include "../../util/event.h"
-#include "../../util/evsel.h"
-#include "../../util/evlist.h"
-#include "../../util/session.h"
+#include "../../../util/cpumap.h"
+#include "../../../util/event.h"
+#include "../../../util/evsel.h"
+#include "../../../util/evlist.h"
+#include "../../../util/session.h"
 #include <internal/lib.h> // page_size
-#include "../../util/pmu.h"
-#include "../../util/debug.h"
-#include "../../util/auxtrace.h"
-#include "../../util/record.h"
-#include "../../util/arm-spe.h"
+#include "../../../util/pmu.h"
+#include "../../../util/debug.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/record.h"
+#include "../../../util/arm-spe.h"
 
 #define KiB(x) ((x) * 1024)
 #define MiB(x) ((x) * 1024 * 1024)
index 2864e2e3776d5105d39f83f4de94d306df4a07ac..2833e101a7c6407263130e9948a06a2caa32bc4b 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-#include "../../util/perf_regs.h"
+#include "../../../util/perf_regs.h"
 
 const struct sample_reg sample_reg_masks[] = {
        SMPL_REG_END
index e9c436eeffc9d8f390eb564619283e49897c0adf..0a5242900248504530b760a60886d68cb1208df9 100644 (file)
@@ -4,8 +4,8 @@
 #include <regex.h>
 #include <linux/zalloc.h>
 
-#include "../../util/perf_regs.h"
-#include "../../util/debug.h"
+#include "../../../util/perf_regs.h"
+#include "../../../util/debug.h"
 
 #include <linux/kernel.h>
 
index 7abc9fd4cbec444032c5ddf3b1c6eae69b477ab6..3da506e13f49ddd002a0c6dc8dc4e6da9d61757a 100644 (file)
@@ -7,13 +7,13 @@
 #include <errno.h>
 #include <stdbool.h>
 
-#include "../../util/header.h"
-#include "../../util/debug.h"
-#include "../../util/pmu.h"
-#include "../../util/auxtrace.h"
-#include "../../util/intel-pt.h"
-#include "../../util/intel-bts.h"
-#include "../../util/evlist.h"
+#include "../../../util/header.h"
+#include "../../../util/debug.h"
+#include "../../../util/pmu.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/intel-pt.h"
+#include "../../../util/intel-bts.h"
+#include "../../../util/evlist.h"
 
 static
 struct auxtrace_record *auxtrace_record__init_intel(struct evlist *evlist,
index ac45015cc6bae1c5c89dd6035a0c6aaec6bd4dd0..047dc00eafa6ec099fda74e794ebcd378525ef2b 100644 (file)
@@ -3,12 +3,12 @@
 #include <linux/string.h>
 #include <linux/zalloc.h>
 
-#include "../../util/event.h"
-#include "../../util/synthetic-events.h"
-#include "../../util/machine.h"
-#include "../../util/tool.h"
-#include "../../util/map.h"
-#include "../../util/debug.h"
+#include "../../../util/event.h"
+#include "../../../util/synthetic-events.h"
+#include "../../../util/machine.h"
+#include "../../../util/tool.h"
+#include "../../../util/map.h"
+#include "../../../util/debug.h"
 
 #if defined(__x86_64__)
 
index aa6deb463bf3c7407fd47cde1edcd933c68c42ff..578c8c568ffd6f1210a421052fd2ad5ba5242879 100644 (file)
@@ -7,8 +7,8 @@
 #include <string.h>
 #include <regex.h>
 
-#include "../../util/debug.h"
-#include "../../util/header.h"
+#include "../../../util/debug.h"
+#include "../../../util/header.h"
 
 static inline void
 cpuid(unsigned int op, unsigned int *a, unsigned int *b, unsigned int *c,
index 26cee10521794be3a91741860c1a93f178c07ab6..09f93800bffd0cbdc5bddc6406fb7eebca7acc3c 100644 (file)
 #include <linux/log2.h>
 #include <linux/zalloc.h>
 
-#include "../../util/cpumap.h"
-#include "../../util/event.h"
-#include "../../util/evsel.h"
-#include "../../util/evlist.h"
-#include "../../util/mmap.h"
-#include "../../util/session.h"
-#include "../../util/pmu.h"
-#include "../../util/debug.h"
-#include "../../util/record.h"
-#include "../../util/tsc.h"
-#include "../../util/auxtrace.h"
-#include "../../util/intel-bts.h"
+#include "../../../util/cpumap.h"
+#include "../../../util/event.h"
+#include "../../../util/evsel.h"
+#include "../../../util/evlist.h"
+#include "../../../util/mmap.h"
+#include "../../../util/session.h"
+#include "../../../util/pmu.h"
+#include "../../../util/debug.h"
+#include "../../../util/record.h"
+#include "../../../util/tsc.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/intel-bts.h"
 #include <internal/lib.h> // page_size
 
 #define KiB(x) ((x) * 1024)
index 7eea4fd7ce58555256618410eccc462ee980dc93..1643aed8c4c8ee319206916ad9f1ebfc772a7784 100644 (file)
 #include <linux/zalloc.h>
 #include <cpuid.h>
 
-#include "../../util/session.h"
-#include "../../util/event.h"
-#include "../../util/evlist.h"
-#include "../../util/evsel.h"
-#include "../../util/evsel_config.h"
-#include "../../util/cpumap.h"
-#include "../../util/mmap.h"
+#include "../../../util/session.h"
+#include "../../../util/event.h"
+#include "../../../util/evlist.h"
+#include "../../../util/evsel.h"
+#include "../../../util/evsel_config.h"
+#include "../../../util/cpumap.h"
+#include "../../../util/mmap.h"
 #include <subcmd/parse-options.h>
-#include "../../util/parse-events.h"
-#include "../../util/pmu.h"
-#include "../../util/debug.h"
-#include "../../util/auxtrace.h"
-#include "../../util/record.h"
-#include "../../util/target.h"
-#include "../../util/tsc.h"
+#include "../../../util/parse-events.h"
+#include "../../../util/pmu.h"
+#include "../../../util/debug.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/record.h"
+#include "../../../util/target.h"
+#include "../../../util/tsc.h"
 #include <internal/lib.h> // page_size
-#include "../../util/intel-pt.h"
+#include "../../../util/intel-pt.h"
 
 #define KiB(x) ((x) * 1024)
 #define MiB(x) ((x) * 1024 * 1024)
index e17e080e76f49b1ed1bcf3415f473e2d1b21ebb1..31679c35d493e87d10af534fa1c5fc9732d8191d 100644 (file)
@@ -5,9 +5,9 @@
 #include <stdlib.h>
 
 #include <internal/lib.h> // page_size
-#include "../../util/machine.h"
-#include "../../util/map.h"
-#include "../../util/symbol.h"
+#include "../../../util/machine.h"
+#include "../../../util/map.h"
+#include "../../../util/symbol.h"
 #include <linux/ctype.h>
 
 #include <symbol/kallsyms.h>
index c218b83e063b52510697a41c047b328d1e17f36f..fca81b39b09f4f65a2f4d64d05264197e8893388 100644 (file)
@@ -5,10 +5,10 @@
 #include <linux/kernel.h>
 #include <linux/zalloc.h>
 
-#include "../../perf-sys.h"
-#include "../../util/perf_regs.h"
-#include "../../util/debug.h"
-#include "../../util/event.h"
+#include "../../../perf-sys.h"
+#include "../../../util/perf_regs.h"
+#include "../../../util/debug.h"
+#include "../../../util/event.h"
 
 const struct sample_reg sample_reg_masks[] = {
        SMPL_REG(AX, PERF_REG_X86_AX),
index e33ef5bc31c57f08aa40ac682b4ca8931aba7751..d48d608517fd273212b0c8c0da310aa8075085a4 100644 (file)
@@ -4,9 +4,9 @@
 #include <linux/stddef.h>
 #include <linux/perf_event.h>
 
-#include "../../util/intel-pt.h"
-#include "../../util/intel-bts.h"
-#include "../../util/pmu.h"
+#include "../../../util/intel-pt.h"
+#include "../../../util/intel-bts.h"
+#include "../../../util/pmu.h"
 
 struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
 {
index fddb3ced9db620f8700faa82494d80c5b2ee1c24..4aa6de1aa67dc6a7f095d135e95f8b406cc7f5c1 100644 (file)
@@ -2,6 +2,10 @@
 #ifndef BENCH_H
 #define BENCH_H
 
+#include <sys/time.h>
+
+extern struct timeval bench__start, bench__end, bench__runtime;
+
 /*
  * The madvise transparent hugepage constants were added in glibc
  * 2.13. For compatibility with older versions of glibc, define these
index bb617e56884129ce83bf4f7db4f79fb0c7f1a95a..cadc18d42aa4af3eab7b190f9a95b8c761d6ddf4 100644 (file)
@@ -35,7 +35,6 @@
 
 static unsigned int nthreads = 0;
 static unsigned int nsecs    = 8;
-struct timeval start, end, runtime;
 static bool done, __verbose, randomize;
 
 /*
@@ -94,8 +93,8 @@ static void toggle_done(int sig __maybe_unused,
 {
        /* inform all threads that we're done for the day */
        done = true;
-       gettimeofday(&end, NULL);
-       timersub(&end, &start, &runtime);
+       gettimeofday(&bench__end, NULL);
+       timersub(&bench__end, &bench__start, &bench__runtime);
 }
 
 static void nest_epollfd(void)
@@ -313,6 +312,7 @@ int bench_epoll_ctl(int argc, const char **argv)
                exit(EXIT_FAILURE);
        }
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
@@ -361,7 +361,7 @@ int bench_epoll_ctl(int argc, const char **argv)
 
        threads_starting = nthreads;
 
-       gettimeofday(&start, NULL);
+       gettimeofday(&bench__start, NULL);
 
        do_threads(worker, cpu);
 
index 7af694437f4ead2adce1cb9079291bd2d134dfcc..f938c585d51248ddfc3277383098c05f5a8df60f 100644 (file)
@@ -90,7 +90,6 @@
 
 static unsigned int nthreads = 0;
 static unsigned int nsecs    = 8;
-struct timeval start, end, runtime;
 static bool wdone, done, __verbose, randomize, nonblocking;
 
 /*
@@ -276,8 +275,8 @@ static void toggle_done(int sig __maybe_unused,
 {
        /* inform all threads that we're done for the day */
        done = true;
-       gettimeofday(&end, NULL);
-       timersub(&end, &start, &runtime);
+       gettimeofday(&bench__end, NULL);
+       timersub(&bench__end, &bench__start, &bench__runtime);
 }
 
 static void print_summary(void)
@@ -287,7 +286,7 @@ static void print_summary(void)
 
        printf("\nAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
               avg, rel_stddev_stats(stddev, avg),
-              (int) runtime.tv_sec);
+              (int)bench__runtime.tv_sec);
 }
 
 static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
@@ -427,6 +426,7 @@ int bench_epoll_wait(int argc, const char **argv)
                exit(EXIT_FAILURE);
        }
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
@@ -479,7 +479,7 @@ int bench_epoll_wait(int argc, const char **argv)
 
        threads_starting = nthreads;
 
-       gettimeofday(&start, NULL);
+       gettimeofday(&bench__start, NULL);
 
        do_threads(worker, cpu);
 
@@ -519,7 +519,7 @@ int bench_epoll_wait(int argc, const char **argv)
                qsort(worker, nthreads, sizeof(struct worker), cmpworker);
 
        for (i = 0; i < nthreads; i++) {
-               unsigned long t = worker[i].ops/runtime.tv_sec;
+               unsigned long t = worker[i].ops / bench__runtime.tv_sec;
 
                update_stats(&throughput_stats, t);
 
index 8ba0c3330a9a2af7a3483d2ad13e85d71350e046..65eebe06c04d406b2fa11c5c43d443675af12b88 100644 (file)
@@ -37,7 +37,7 @@ static unsigned int nfutexes = 1024;
 static bool fshared = false, done = false, silent = false;
 static int futex_flag = 0;
 
-struct timeval start, end, runtime;
+struct timeval bench__start, bench__end, bench__runtime;
 static pthread_mutex_t thread_lock;
 static unsigned int threads_starting;
 static struct stats throughput_stats;
@@ -103,8 +103,8 @@ static void toggle_done(int sig __maybe_unused,
 {
        /* inform all threads that we're done for the day */
        done = true;
-       gettimeofday(&end, NULL);
-       timersub(&end, &start, &runtime);
+       gettimeofday(&bench__end, NULL);
+       timersub(&bench__end, &bench__start, &bench__runtime);
 }
 
 static void print_summary(void)
@@ -114,7 +114,7 @@ static void print_summary(void)
 
        printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
               !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
-              (int) runtime.tv_sec);
+              (int)bench__runtime.tv_sec);
 }
 
 int bench_futex_hash(int argc, const char **argv)
@@ -137,6 +137,7 @@ int bench_futex_hash(int argc, const char **argv)
        if (!cpu)
                goto errmem;
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
@@ -161,7 +162,7 @@ int bench_futex_hash(int argc, const char **argv)
 
        threads_starting = nthreads;
        pthread_attr_init(&thread_attr);
-       gettimeofday(&start, NULL);
+       gettimeofday(&bench__start, NULL);
        for (i = 0; i < nthreads; i++) {
                worker[i].tid = i;
                worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex));
@@ -204,7 +205,7 @@ int bench_futex_hash(int argc, const char **argv)
        pthread_mutex_destroy(&thread_lock);
 
        for (i = 0; i < nthreads; i++) {
-               unsigned long t = worker[i].ops/runtime.tv_sec;
+               unsigned long t = worker[i].ops / bench__runtime.tv_sec;
                update_stats(&throughput_stats, t);
                if (!silent) {
                        if (nfutexes == 1)
index d0cae8125423f69a76f2435c6c8ee01e926f69ea..89fd8f325f384eeac2f5e71c246b1e9aff2f28eb 100644 (file)
@@ -37,7 +37,6 @@ static bool silent = false, multi = false;
 static bool done = false, fshared = false;
 static unsigned int nthreads = 0;
 static int futex_flag = 0;
-struct timeval start, end, runtime;
 static pthread_mutex_t thread_lock;
 static unsigned int threads_starting;
 static struct stats throughput_stats;
@@ -64,7 +63,7 @@ static void print_summary(void)
 
        printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
               !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
-              (int) runtime.tv_sec);
+              (int)bench__runtime.tv_sec);
 }
 
 static void toggle_done(int sig __maybe_unused,
@@ -73,8 +72,8 @@ static void toggle_done(int sig __maybe_unused,
 {
        /* inform all threads that we're done for the day */
        done = true;
-       gettimeofday(&end, NULL);
-       timersub(&end, &start, &runtime);
+       gettimeofday(&bench__end, NULL);
+       timersub(&bench__end, &bench__start, &bench__runtime);
 }
 
 static void *workerfn(void *arg)
@@ -161,6 +160,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
        if (!cpu)
                err(EXIT_FAILURE, "calloc");
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
@@ -185,7 +185,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
 
        threads_starting = nthreads;
        pthread_attr_init(&thread_attr);
-       gettimeofday(&start, NULL);
+       gettimeofday(&bench__start, NULL);
 
        create_threads(worker, thread_attr, cpu);
        pthread_attr_destroy(&thread_attr);
@@ -211,7 +211,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
        pthread_mutex_destroy(&thread_lock);
 
        for (i = 0; i < nthreads; i++) {
-               unsigned long t = worker[i].ops/runtime.tv_sec;
+               unsigned long t = worker[i].ops / bench__runtime.tv_sec;
 
                update_stats(&throughput_stats, t);
                if (!silent)
index a00a6891447ab3dcf3595f82b5526c6cea64ee7c..7a15c2e610228f081d8a3e5b8da2e5e283a8a427 100644 (file)
@@ -128,6 +128,7 @@ int bench_futex_requeue(int argc, const char **argv)
        if (!cpu)
                err(EXIT_FAILURE, "cpu_map__new");
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
index a053cf2b703974353ce0456e5c1e2a3a5b454259..cd2b81a845acb0537f8841fb914b006c69c17772 100644 (file)
@@ -234,6 +234,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
                exit(EXIT_FAILURE);
        }
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
index df810096abfef9f9173818f8eaa052f3769d9a35..2dfcef3e371e4b15b9dead17bfa348b00e2a79d1 100644 (file)
@@ -43,7 +43,7 @@ static bool done = false, silent = false, fshared = false;
 static pthread_mutex_t thread_lock;
 static pthread_cond_t thread_parent, thread_worker;
 static struct stats waketime_stats, wakeup_stats;
-static unsigned int ncpus, threads_starting, nthreads = 0;
+static unsigned int threads_starting, nthreads = 0;
 static int futex_flag = 0;
 
 static const struct option options[] = {
@@ -136,12 +136,13 @@ int bench_futex_wake(int argc, const char **argv)
        if (!cpu)
                err(EXIT_FAILURE, "calloc");
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
 
        if (!nthreads)
-               nthreads = ncpus;
+               nthreads = cpu->nr;
 
        worker = calloc(nthreads, sizeof(*worker));
        if (!worker)
index f8b6ae557d8bd7b7750a8fccf53ae338059ef91e..c03c36fde7e2f3a0d31146c948a38268560227f9 100644 (file)
@@ -1312,7 +1312,8 @@ static int cycles_printf(struct hist_entry *he, struct hist_entry *pair,
        end_line = map__srcline(he->ms.map, bi->sym->start + bi->end,
                                he->ms.sym);
 
-       if ((start_line != SRCLINE_UNKNOWN) && (end_line != SRCLINE_UNKNOWN)) {
+       if ((strncmp(start_line, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0) &&
+           (strncmp(end_line, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0)) {
                scnprintf(buf, sizeof(buf), "[%s -> %s] %4ld",
                          start_line, end_line, block_he->diff.cycles);
        } else {
index f6dd1a63f159e970041d8614b0d0223cdf4a8048..d2539b793f9d4fb8ae7299f0183217529d5525d9 100644 (file)
@@ -684,7 +684,9 @@ static void *display_thread(void *arg)
        delay_msecs = top->delay_secs * MSEC_PER_SEC;
        set_term_quiet_input(&save);
        /* trash return*/
-       getc(stdin);
+       clearerr(stdin);
+       if (poll(&stdin_poll, 1, 0) > 0)
+               getc(stdin);
 
        while (!done) {
                perf_top__print_sym_table(top);
index 079c77b6a2fdfd236ca57fe424a6e04059291236..27b4da80f751177968848ff1656caaa5e91dfb1c 100644 (file)
@@ -1082,10 +1082,9 @@ static int process_one_file(const char *fpath, const struct stat *sb,
  */
 int main(int argc, char *argv[])
 {
-       int rc;
+       int rc, ret = 0;
        int maxfds;
        char ldirname[PATH_MAX];
-
        const char *arch;
        const char *output_file;
        const char *start_dirname;
@@ -1156,7 +1155,8 @@ int main(int argc, char *argv[])
                /* Make build fail */
                fclose(eventsfp);
                free_arch_std_events();
-               return 1;
+               ret = 1;
+               goto out_free_mapfile;
        } else if (rc) {
                goto empty_map;
        }
@@ -1174,14 +1174,17 @@ int main(int argc, char *argv[])
                /* Make build fail */
                fclose(eventsfp);
                free_arch_std_events();
-               return 1;
+               ret = 1;
        }
 
-       return 0;
+
+       goto out_free_mapfile;
 
 empty_map:
        fclose(eventsfp);
        create_empty_mapping(output_file);
        free_arch_std_events();
-       return 0;
+out_free_mapfile:
+       free(mapfile);
+       return ret;
 }
index d0b935356274b2297970f28ca319dd9205602284..489b50604cf274046b879c54eb0e2a9037236ff8 100644 (file)
@@ -19,7 +19,7 @@
 #include "../perf-sys.h"
 #include "cloexec.h"
 
-volatile long the_var;
+static volatile long the_var;
 
 static noinline int test_function(void)
 {
index c4b030bf6ec2d258da7fecac94d7154b859fb580..fbbb6d640dadcff16ad08083148fdfc9b4598e06 100644 (file)
@@ -295,7 +295,8 @@ static int block_range_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
        end_line = map__srcline(he->ms.map, bi->sym->start + bi->end,
                                he->ms.sym);
 
-       if ((start_line != SRCLINE_UNKNOWN) && (end_line != SRCLINE_UNKNOWN)) {
+       if ((strncmp(start_line, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0) &&
+           (strncmp(end_line, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0)) {
                scnprintf(buf, sizeof(buf), "[%s -> %s]",
                          start_line, end_line);
        } else {
index 6242a9215df7ee325265646dd9ce71acbc0c73b8..4154f944f474a4152d4469c652a040db4b05a78a 100644 (file)
@@ -343,11 +343,11 @@ static const char *normalize_arch(char *arch)
 
 const char *perf_env__arch(struct perf_env *env)
 {
-       struct utsname uts;
        char *arch_name;
 
        if (!env || !env->arch) { /* Assume local operation */
-               if (uname(&uts) < 0)
+               static struct utsname uts = { .machine[0] = '\0', };
+               if (uts.machine[0] == '\0' && uname(&uts) < 0)
                        return NULL;
                arch_name = uts.machine;
        } else
index a08ca276098ee1c8fa094307b92c51fa5699e003..95428511300d1e371c756846fbdfd3b415e94782 100644 (file)
@@ -431,7 +431,7 @@ int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
 
        if (map && map->dso) {
                char *srcline = map__srcline(map, addr, NULL);
-               if (srcline != SRCLINE_UNKNOWN)
+               if (strncmp(srcline, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0)
                        ret = fprintf(fp, "%s%s", prefix, srcline);
                free_srcline(srcline);
        }
index c01ba6f8fdad3a3662662267d0941a7bd3b6db04..a14995835d85980f8ac86725ea9f197499d8b6d1 100644 (file)
@@ -257,21 +257,15 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
                                path = zalloc(sizeof(*path));
                                if (!path)
                                        return NULL;
-                               path->system = malloc(MAX_EVENT_LENGTH);
-                               if (!path->system) {
+                               if (asprintf(&path->system, "%.*s", MAX_EVENT_LENGTH, sys_dirent->d_name) < 0) {
                                        free(path);
                                        return NULL;
                                }
-                               path->name = malloc(MAX_EVENT_LENGTH);
-                               if (!path->name) {
+                               if (asprintf(&path->name, "%.*s", MAX_EVENT_LENGTH, evt_dirent->d_name) < 0) {
                                        zfree(&path->system);
                                        free(path);
                                        return NULL;
                                }
-                               strncpy(path->system, sys_dirent->d_name,
-                                       MAX_EVENT_LENGTH);
-                               strncpy(path->name, evt_dirent->d_name,
-                                       MAX_EVENT_LENGTH);
                                return path;
                        }
                }
index 1077013d8ce2ed1a880ddd7f9f6a189311ce32d3..26bc6a0096ce568bd4e9e70fa910063f1633787b 100644 (file)
@@ -1622,7 +1622,12 @@ int dso__load(struct dso *dso, struct map *map)
                goto out;
        }
 
-       if (dso->kernel) {
+       kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
+               dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
+               dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
+               dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
+
+       if (dso->kernel && !kmod) {
                if (dso->kernel == DSO_TYPE_KERNEL)
                        ret = dso__load_kernel_sym(dso, map);
                else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
@@ -1650,12 +1655,6 @@ int dso__load(struct dso *dso, struct map *map)
        if (!name)
                goto out;
 
-       kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
-               dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
-               dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
-               dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
-
-
        /*
         * Read the build id if possible. This is required for
         * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
index 33dc34db4f3cc62c956676466fbab7eb78f0f140..20f46348271b1b9dcdadfff8918150a5944de4d8 100644 (file)
@@ -82,7 +82,7 @@ static struct pci_access *pci_acc;
 static struct pci_dev *amd_fam14h_pci_dev;
 static int nbp1_entered;
 
-struct timespec start_time;
+static struct timespec start_time;
 static unsigned long long timediff;
 
 #ifdef DEBUG
index 3c4cee160b0e69a91f8bb8dbf5527ed32cd442fa..a65f7d011513a3f3d311790cf6b9c81aee99fb1d 100644 (file)
@@ -19,7 +19,7 @@ struct cpuidle_monitor cpuidle_sysfs_monitor;
 
 static unsigned long long **previous_count;
 static unsigned long long **current_count;
-struct timespec start_time;
+static struct timespec start_time;
 static unsigned long long timediff;
 
 static int cpuidle_get_count_percent(unsigned int id, double *percent,
index 6d44fec55ad5ac230874fd7bc6d86d84be8800c9..7c77045fef52f1d20c2e1c71602cbd7da8dab168 100644 (file)
@@ -27,6 +27,8 @@ struct cpuidle_monitor *all_monitors[] = {
 0
 };
 
+int cpu_count;
+
 static struct cpuidle_monitor *monitors[MONITORS_MAX];
 static unsigned int avail_monitors;
 
index 5b5eb1da0cce39257275cbe73d20581ae4e6926e..c559d3115330a41e4c31d22876175b4490c85413 100644 (file)
@@ -25,7 +25,7 @@
 #endif
 #define CSTATE_DESC_LEN 60
 
-int cpu_count;
+extern int cpu_count;
 
 /* Hard to define the right names ...: */
 enum power_range_e {
index 220d04f958a628948d62409afc2c3c2ce35d874b..7570e36d636d833670253881fc501444d2bfcba1 100755 (executable)
@@ -30,7 +30,7 @@ my %default = (
     "EMAIL_WHEN_STARTED"       => 0,
     "NUM_TESTS"                        => 1,
     "TEST_TYPE"                        => "build",
-    "BUILD_TYPE"               => "randconfig",
+    "BUILD_TYPE"               => "oldconfig",
     "MAKE_CMD"                 => "make",
     "CLOSE_CONSOLE_SIGNAL"     => "INT",
     "TIMEOUT"                  => 120,
@@ -1030,7 +1030,7 @@ sub __read_config {
            }
 
            if (!$skip && $rest !~ /^\s*$/) {
-               die "$name: $.: Gargbage found after $type\n$_";
+               die "$name: $.: Garbage found after $type\n$_";
            }
 
            if ($skip && $type eq "TEST_START") {
@@ -1063,7 +1063,7 @@ sub __read_config {
            }
 
            if ($rest !~ /^\s*$/) {
-               die "$name: $.: Gargbage found after DEFAULTS\n$_";
+               die "$name: $.: Garbage found after DEFAULTS\n$_";
            }
 
        } elsif (/^\s*INCLUDE\s+(\S+)/) {
@@ -1154,7 +1154,7 @@ sub __read_config {
            # on of these sections that have SKIP defined.
            # The save variable can be
            # defined multiple times and the new one simply overrides
-           # the prevous one.
+           # the previous one.
            set_variable($lvalue, $rvalue);
 
        } else {
@@ -1234,7 +1234,7 @@ sub read_config {
        foreach my $option (keys %not_used) {
            print "$option\n";
        }
-       print "Set IGRNORE_UNUSED = 1 to have ktest ignore unused variables\n";
+       print "Set IGNORE_UNUSED = 1 to have ktest ignore unused variables\n";
        if (!read_yn "Do you want to continue?") {
            exit -1;
        }
@@ -1345,7 +1345,7 @@ sub eval_option {
        # Check for recursive evaluations.
        # 100 deep should be more than enough.
        if ($r++ > 100) {
-           die "Over 100 evaluations accurred with $option\n" .
+           die "Over 100 evaluations occurred with $option\n" .
                "Check for recursive variables\n";
        }
        $prev = $option;
@@ -1383,7 +1383,7 @@ sub reboot {
 
     } else {
        # Make sure everything has been written to disk
-       run_ssh("sync");
+       run_ssh("sync", 10);
 
        if (defined($time)) {
            start_monitor;
@@ -1461,7 +1461,7 @@ sub get_test_name() {
 
 sub dodie {
 
-    # avoid recusion
+    # avoid recursion
     return if ($in_die);
     $in_die = 1;
 
index c3bc933d437b34d3f9e77029908517d6a53b0e32..27666b8007edbb596085ebfb49441be8ad160c2e 100644 (file)
@@ -10,7 +10,7 @@
 #
 
 # Options set in the beginning of the file are considered to be
-# default options. These options can be overriden by test specific
+# default options. These options can be overridden by test specific
 # options, with the following exceptions:
 #
 #  LOG_FILE
 #
 # This config file can also contain "config variables".
 # These are assigned with ":=" instead of the ktest option
-# assigment "=".
+# assignment "=".
 #
 # The difference between ktest options and config variables
 # is that config variables can be used multiple times,
 #### Using options in other options ####
 #
 # Options that are defined in the config file may also be used
-# by other options. All options are evaulated at time of
+# by other options. All options are evaluated at time of
 # use (except that config variables are evaluated at config
 # processing time).
 #
 #TEST = ssh user@machine /root/run_test
 
 # The build type is any make config type or special command
-#  (default randconfig)
+#  (default oldconfig)
 #   nobuild - skip the clean and build step
 #   useconfig:/path/to/config - use the given config and run
 #              oldconfig on it.
 
 # Line to define a successful boot up in console output.
 # This is what the line contains, not the entire line. If you need
-# the entire line to match, then use regural expression syntax like:
+# the entire line to match, then use regular expression syntax like:
 #  (do not add any quotes around it)
 #
 #  SUCCESS_LINE = ^MyBox Login:$
 # (ignored if POWEROFF_ON_SUCCESS is set)
 #REBOOT_ON_SUCCESS = 1
 
-# In case there are isses with rebooting, you can specify this
+# In case there are issues with rebooting, you can specify this
 # to always powercycle after this amount of time after calling
 # reboot.
 # Note, POWERCYCLE_AFTER_REBOOT = 0 does NOT disable it. It just
 # (default undefined)
 #POWERCYCLE_AFTER_REBOOT = 5
 
-# In case there's isses with halting, you can specify this
+# In case there's issues with halting, you can specify this
 # to always poweroff after this amount of time after calling
 # halt.
 # Note, POWEROFF_AFTER_HALT = 0 does NOT disable it. It just
 #
 #  PATCHCHECK_START is required and is the first patch to
 #   test (the SHA1 of the commit). You may also specify anything
-#   that git checkout allows (branch name, tage, HEAD~3).
+#   that git checkout allows (branch name, tag, HEAD~3).
 #
 #  PATCHCHECK_END is the last patch to check (default HEAD)
 #
 #     IGNORE_WARNINGS is set for the given commit's sha1
 #
 #   IGNORE_WARNINGS can be used to disable the failure of patchcheck
-#     on a particuler commit (SHA1). You can add more than one commit
+#     on a particular commit (SHA1). You can add more than one commit
 #     by adding a list of SHA1s that are space delimited.
 #
 #   If BUILD_NOCLEAN is set, then make mrproper will not be run on
 #   whatever reason. (Can't reboot, want to inspect each iteration)
 #   Doing a BISECT_MANUAL will have the test wait for you to
 #   tell it if the test passed or failed after each iteration.
-#   This is basicall the same as running git bisect yourself
+#   This is basically the same as running git bisect yourself
 #   but ktest will rebuild and install the kernel for you.
 #
 # BISECT_CHECK = 1 (optional, default 0)
 #
 # CONFIG_BISECT_EXEC (optional)
 #  The config bisect is a separate program that comes with ktest.pl.
-#  By befault, it will look for:
+#  By default, it will look for:
 #    `pwd`/config-bisect.pl # the location ktest.pl was executed from.
 #  If it does not find it there, it will look for:
 #    `dirname <ktest.pl>`/config-bisect.pl # The directory that holds ktest.pl
index 60273f1bc7d9c0cfe8324d8bd4b256cc88ad2fff..b7616704b55e982f20d55c768def568f54e0efc1 100755 (executable)
@@ -1041,6 +1041,27 @@ ipv6_addr_metric_test()
        fi
        log_test $rc 0 "Prefix route with metric on link up"
 
+       # verify peer metric added correctly
+       set -e
+       run_cmd "$IP -6 addr flush dev dummy2"
+       run_cmd "$IP -6 addr add dev dummy2 2001:db8:104::1 peer 2001:db8:104::2 metric 260"
+       set +e
+
+       check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 260"
+       log_test $? 0 "Set metric with peer route on local side"
+       log_test $? 0 "User specified metric on local address"
+       check_route6 "2001:db8:104::2 dev dummy2 proto kernel metric 260"
+       log_test $? 0 "Set metric with peer route on peer side"
+
+       set -e
+       run_cmd "$IP -6 addr change dev dummy2 2001:db8:104::1 peer 2001:db8:104::3 metric 261"
+       set +e
+
+       check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 261"
+       log_test $? 0 "Modify metric and peer address on local side"
+       check_route6 "2001:db8:104::3 dev dummy2 proto kernel metric 261"
+       log_test $? 0 "Modify metric and peer address on peer side"
+
        $IP li del dummy1
        $IP li del dummy2
        cleanup
@@ -1457,13 +1478,20 @@ ipv4_addr_metric_test()
 
        run_cmd "$IP addr flush dev dummy2"
        run_cmd "$IP addr add dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 260"
-       run_cmd "$IP addr change dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 261"
        rc=$?
        if [ $rc -eq 0 ]; then
-               check_route "172.16.104.2 dev dummy2 proto kernel scope link src 172.16.104.1 metric 261"
+               check_route "172.16.104.2 dev dummy2 proto kernel scope link src 172.16.104.1 metric 260"
+               rc=$?
+       fi
+       log_test $rc 0 "Set metric of address with peer route"
+
+       run_cmd "$IP addr change dev dummy2 172.16.104.1/32 peer 172.16.104.3 metric 261"
+       rc=$?
+       if [ $rc -eq 0 ]; then
+               check_route "172.16.104.3 dev dummy2 proto kernel scope link src 172.16.104.1 metric 261"
                rc=$?
        fi
-       log_test $rc 0 "Modify metric of address with peer route"
+       log_test $rc 0 "Modify metric and peer address for peer route"
 
        $IP li del dummy1
        $IP li del dummy2
index 477bc61b374a5e4e4cdfb31f1b750d512e7c84f0..c03af46002818234020d58a55bf9a7ac116acfd1 100644 (file)
@@ -57,3 +57,4 @@ CONFIG_NET_IFE_SKBMARK=m
 CONFIG_NET_IFE_SKBPRIO=m
 CONFIG_NET_IFE_SKBTCINDEX=m
 CONFIG_NET_SCH_FIFO=y
+CONFIG_NET_SCH_ETS=m