]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'for-upstream/mali-dp' of git://linux-arm.org/linux-ld into drm-next
authorDave Airlie <airlied@redhat.com>
Thu, 4 Oct 2018 01:30:58 +0000 (11:30 +1000)
committerDave Airlie <airlied@redhat.com>
Thu, 4 Oct 2018 01:31:05 +0000 (11:31 +1000)
misc mali-dp updates.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Liviu Dudau <Liviu.Dudau@arm.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181003105009.GD1156@e110455-lin.cambridge.arm.com
286 files changed:
Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt
Documentation/devicetree/bindings/input/gpio-keys.txt
Documentation/media/uapi/dvb/video_function_calls.rst
MAINTAINERS
Makefile
arch/powerpc/include/asm/setup.h
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/tm.S
arch/powerpc/lib/checksum_64.S
arch/powerpc/lib/code-patching.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/pkeys.c
arch/powerpc/platforms/powernv/pci-ioda-tce.c
arch/riscv/include/asm/asm-prototypes.h [new file with mode: 0644]
arch/x86/boot/compressed/mem_encrypt.S
block/blk-mq-tag.c
block/blk-mq.c
block/elevator.c
drivers/block/xen-blkfront.c
drivers/clocksource/timer-atmel-pit.c
drivers/clocksource/timer-fttmr010.c
drivers/clocksource/timer-ti-32k.c
drivers/cpufreq/qcom-cpufreq-kryo.c
drivers/dax/device.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_panel.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.h
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_drm_plane.c
drivers/gpu/drm/exynos/exynos_drm_scaler.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/exynos/regs-mixer.h
drivers/gpu/drm/omapdrm/dss/dispc.c
drivers/gpu/drm/omapdrm/dss/dss.c
drivers/gpu/drm/omapdrm/omap_connector.c
drivers/gpu/drm/omapdrm/omap_dmm_priv.h
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/omapdrm/omap_fb.c
drivers/gpu/drm/omapdrm/omap_fbdev.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
drivers/gpu/drm/omapdrm/tcm-sita.h [deleted file]
drivers/gpu/drm/tilcdc/tilcdc_drv.c
drivers/gpu/drm/tilcdc/tilcdc_drv.h
drivers/gpu/drm/ttm/Makefile
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/vmwgfx/Makefile
drivers/gpu/drm/vmwgfx/ttm_lock.c [moved from drivers/gpu/drm/ttm/ttm_lock.c with 94% similarity]
drivers/gpu/drm/vmwgfx/ttm_lock.h [moved from include/drm/ttm/ttm_lock.h with 100% similarity]
drivers/gpu/drm/vmwgfx/ttm_object.c [moved from drivers/gpu/drm/ttm/ttm_object.c with 90% similarity]
drivers/gpu/drm/vmwgfx/ttm_object.h [moved from include/drm/ttm/ttm_object.h with 94% similarity]
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_so.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_validation.h [new file with mode: 0644]
drivers/hwtracing/intel_th/core.c
drivers/hwtracing/intel_th/pci.c
drivers/infiniband/core/cache.c
drivers/infiniband/core/ucma.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/uverbs_uapi.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/pio.c
drivers/infiniband/hw/hfi1/pio.h
drivers/infiniband/hw/hfi1/user_sdma.c
drivers/infiniband/hw/hfi1/verbs.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/input/keyboard/atakbd.c
drivers/input/misc/uinput.c
drivers/input/mouse/elantech.c
drivers/input/touchscreen/egalax_ts.c
drivers/iommu/amd_iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel-pasid.h
drivers/iommu/rockchip-iommu.c
drivers/md/bcache/bcache.h
drivers/md/bcache/journal.c
drivers/md/bcache/super.c
drivers/media/i2c/mt9v111.c
drivers/media/platform/Kconfig
drivers/media/platform/qcom/camss/camss-csid.c
drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
drivers/media/platform/qcom/camss/camss-csiphy.c
drivers/media/platform/qcom/camss/camss-ispif.c
drivers/media/platform/qcom/camss/camss-vfe-4-1.c
drivers/media/platform/qcom/camss/camss-vfe-4-7.c
drivers/media/platform/qcom/camss/camss.c
drivers/media/usb/dvb-usb-v2/af9035.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/apple/bmac.c
drivers/net/ethernet/apple/mace.c
drivers/net/ethernet/apple/macmace.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/cirrus/ep93xx_eth.c
drivers/net/ethernet/cirrus/mac89x0.c
drivers/net/ethernet/i825xx/ether1.c
drivers/net/ethernet/i825xx/lib82596.c
drivers/net/ethernet/i825xx/sun3_82586.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/intel/fm10k/fm10k.h
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/transobj.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mscc/ocelot_board.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.h
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_hsi.h
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/ravb.h
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/ravb_ptp.c
drivers/net/ethernet/seeq/ether3.c
drivers/net/ethernet/seeq/sgiseeq.c
drivers/net/ethernet/sgi/ioc3-eth.c
drivers/net/ethernet/sgi/meth.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/wiznet/w5100.c
drivers/net/ethernet/wiznet/w5300.c
drivers/net/phy/sfp-bus.c
drivers/net/tun.c
drivers/nvme/host/multipath.c
drivers/pci/controller/dwc/pcie-designware.c
drivers/pci/controller/dwc/pcie-designware.h
drivers/pci/controller/pci-hyperv.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pinctrl/intel/pinctrl-cannonlake.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/pinctrl-amd.c
drivers/regulator/bd71837-regulator.c
drivers/regulator/core.c
drivers/regulator/of_regulator.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/sd.c
drivers/scsi/ufs/ufshcd.c
drivers/soundwire/stream.c
drivers/spi/spi-gpio.c
drivers/spi/spi-rspi.c
drivers/spi/spi-sh-msiof.c
drivers/spi/spi-tegra20-slink.c
drivers/staging/media/mt9t031/Kconfig
drivers/target/iscsi/iscsi_target_auth.c
drivers/tty/serial/cpm_uart/cpm_uart_core.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/imx.c
drivers/tty/serial/mvebu-uart.c
drivers/tty/tty_io.c
drivers/tty/vt/vt_ioctl.c
drivers/usb/class/cdc-wdm.c
drivers/usb/common/roles.c
drivers/usb/core/devio.c
drivers/usb/core/driver.c
drivers/usb/core/quirks.c
drivers/usb/core/usb.c
drivers/usb/musb/musb_dsps.c
drivers/usb/typec/mux.c
fs/dax.c
fs/ext2/inode.c
include/drm/drm_panel.h
include/drm/ttm/ttm_bo_api.h
include/linux/mfd/rohm-bd718x7.h
include/linux/netpoll.h
include/linux/regulator/machine.h
include/linux/spi/spi-mem.h
include/linux/stmmac.h
include/linux/uio.h
include/net/nfc/hci.h
include/uapi/drm/drm_fourcc.h
include/uapi/linux/keyctl.h
kernel/bpf/sockmap.c
kernel/dma/Kconfig
kernel/events/core.c
net/batman-adv/bat_v_elp.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/gateway_client.c
net/batman-adv/main.h
net/batman-adv/network-coding.c
net/batman-adv/soft-interface.c
net/batman-adv/sysfs.c
net/batman-adv/translation-table.c
net/batman-adv/tvlv.c
net/core/devlink.c
net/core/ethtool.c
net/core/netpoll.c
net/ipv4/ip_tunnel.c
net/ipv6/addrconf.c
net/ipv6/ip6_tunnel.c
net/ipv6/route.c
net/mpls/af_mpls.c
net/netlabel/netlabel_unlabeled.c
net/nfc/hci/core.c
net/rds/ib.h
net/sctp/transport.c
net/smc/af_smc.c
net/smc/smc_clc.c
net/smc/smc_close.c
net/smc/smc_pnet.c
security/keys/dh.c
tools/include/tools/libc_compat.h
tools/testing/selftests/bpf/test_maps.c
tools/testing/selftests/net/pmtu.sh
tools/testing/selftests/powerpc/alignment/Makefile
tools/testing/selftests/powerpc/benchmarks/Makefile
tools/testing/selftests/powerpc/cache_shape/Makefile
tools/testing/selftests/powerpc/copyloops/Makefile
tools/testing/selftests/powerpc/dscr/Makefile
tools/testing/selftests/powerpc/math/Makefile
tools/testing/selftests/powerpc/mm/Makefile
tools/testing/selftests/powerpc/pmu/Makefile
tools/testing/selftests/powerpc/pmu/ebb/Makefile
tools/testing/selftests/powerpc/primitives/Makefile
tools/testing/selftests/powerpc/ptrace/Makefile
tools/testing/selftests/powerpc/signal/Makefile
tools/testing/selftests/powerpc/stringloops/Makefile
tools/testing/selftests/powerpc/switch_endian/Makefile
tools/testing/selftests/powerpc/syscalls/Makefile
tools/testing/selftests/powerpc/tm/Makefile
tools/testing/selftests/powerpc/vphn/Makefile

index 2fff8b406f4cbe3ef8eec8e4bc72dd4d07d88c00..be377786e8cdda6f4dc6fd355e03c09084a6944f 100644 (file)
@@ -21,6 +21,9 @@ Required properties:
   - samsung,pll-clock-frequency: specifies frequency of the oscillator clock
   - #address-cells, #size-cells: should be set respectively to <1> and <0>
     according to DSI host bindings (see MIPI DSI bindings [1])
+  - samsung,burst-clock-frequency: specifies DSI frequency in high-speed burst
+    mode
+  - samsung,esc-clock-frequency: specifies DSI frequency in escape mode
 
 Optional properties:
   - power-domains: a phandle to DSIM power domain node
@@ -29,25 +32,9 @@ Child nodes:
   Should contain DSI peripheral nodes (see MIPI DSI bindings [1]).
 
 Video interfaces:
-  Device node can contain video interface port nodes according to [2].
-  The following are properties specific to those nodes:
-
-  port node inbound:
-    - reg: (required) must be 0.
-  port node outbound:
-    - reg: (required) must be 1.
-
-  endpoint node connected from mic node (reg = 0):
-    - remote-endpoint: specifies the endpoint in mic node. This node is required
-                      for Exynos5433 mipi dsi. So mic can access to panel node
-                      throughout this dsi node.
-  endpoint node connected to panel node (reg = 1):
-    - remote-endpoint: specifies the endpoint in panel node. This node is
-                      required in all kinds of exynos mipi dsi to represent
-                      the connection between mipi dsi and panel.
-    - samsung,burst-clock-frequency: specifies DSI frequency in high-speed burst
-      mode
-    - samsung,esc-clock-frequency: specifies DSI frequency in escape mode
+  Device node can contain following video interface port nodes according to [2]:
+  0: RGB input,
+  1: DSI output
 
 [1]: Documentation/devicetree/bindings/display/mipi-dsi-bus.txt
 [2]: Documentation/devicetree/bindings/media/video-interfaces.txt
index 996ce84352cbf11627adc17c13d079cc17601de1..7cccc49b6beade0d60adaafc228b3ed8ecfa34e0 100644 (file)
@@ -1,4 +1,4 @@
-Device-Tree bindings for input/gpio_keys.c keyboard driver
+Device-Tree bindings for input/keyboard/gpio_keys.c keyboard driver
 
 Required properties:
        - compatible = "gpio-keys";
index 3f4f6c9ffad7113c0c65ab717bf5d01bb8b00c7d..a4222b6cd2d3b7ffa558a9efee9db8c41ea3d58a 100644 (file)
@@ -33,4 +33,3 @@ Video Function Calls
     video-clear-buffer
     video-set-streamtype
     video-set-format
-    video-set-attributes
index 94c1fef3279b4a35ce04767446ecd6b44ce4fe83..0c45d35626cfb8eef720f2c293b7e66d900c6d03 100644 (file)
@@ -9721,13 +9721,6 @@ Q:       http://patchwork.linuxtv.org/project/linux-media/list/
 S:     Maintained
 F:     drivers/media/dvb-frontends/mn88473*
 
-PCI DRIVER FOR MOBIVEIL PCIE IP
-M:     Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
-L:     linux-pci@vger.kernel.org
-S:     Supported
-F:     Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
-F:     drivers/pci/controller/pcie-mobiveil.c
-
 MODULE SUPPORT
 M:     Jessica Yu <jeyu@kernel.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
@@ -10957,7 +10950,7 @@ M:      Willy Tarreau <willy@haproxy.com>
 M:     Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
 S:     Odd Fixes
 F:     Documentation/auxdisplay/lcd-panel-cgram.txt
-F:     drivers/misc/panel.c
+F:     drivers/auxdisplay/panel.c
 
 PARALLEL PORT SUBSYSTEM
 M:     Sudip Mukherjee <sudipm.mukherjee@gmail.com>
@@ -11145,6 +11138,13 @@ F:     include/uapi/linux/switchtec_ioctl.h
 F:     include/linux/switchtec.h
 F:     drivers/ntb/hw/mscc/
 
+PCI DRIVER FOR MOBIVEIL PCIE IP
+M:     Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+L:     linux-pci@vger.kernel.org
+S:     Supported
+F:     Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
+F:     drivers/pci/controller/pcie-mobiveil.c
+
 PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
 M:     Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
 M:     Jason Cooper <jason@lakedaemon.net>
@@ -11211,8 +11211,14 @@ F:     tools/pci/
 
 PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC
 M:     Russell Currey <ruscur@russell.cc>
+M:     Sam Bobroff <sbobroff@linux.ibm.com>
+M:     Oliver O'Halloran <oohall@gmail.com>
 L:     linuxppc-dev@lists.ozlabs.org
 S:     Supported
+F:     Documentation/PCI/pci-error-recovery.txt
+F:     drivers/pci/pcie/aer.c
+F:     drivers/pci/pcie/dpc.c
+F:     drivers/pci/pcie/err.c
 F:     Documentation/powerpc/eeh-pci-error-recovery.txt
 F:     arch/powerpc/kernel/eeh*.c
 F:     arch/powerpc/platforms/*/eeh*.c
index 0c90c435497921f581a04c56b5c73d51f4df7383..6c3da3e10f07c5a3f94d4ea3efa49d0287aadcc6 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
 NAME = Merciless Moray
 
 # *DOCUMENTATION*
index 1a951b00465d739f0b7268803d9c1a25e92e947f..1fffbba8d6a5e64a5fefdb06a6ecab29f4ec66e5 100644 (file)
@@ -9,6 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
 
 extern unsigned int rtas_data;
 extern unsigned long long memory_limit;
+extern bool init_mem_is_free;
 extern unsigned long klimit;
 extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
 
index ea04dfb8c0927f71e1a89937526f3e1f2d7fd241..2d8fc8c9da7a1f210816bd9734c3d8453d8fc04e 100644 (file)
@@ -1314,9 +1314,7 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
 
 #ifdef CONFIG_PPC_DENORMALISATION
        mfspr   r10,SPRN_HSRR1
-       mfspr   r11,SPRN_HSRR0          /* save HSRR0 */
        andis.  r10,r10,(HSRR1_DENORM)@h /* denorm? */
-       addi    r11,r11,-4              /* HSRR0 is next instruction */
        bne+    denorm_assist
 #endif
 
@@ -1382,6 +1380,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
  */
        XVCPSGNDP32(32)
 denorm_done:
+       mfspr   r11,SPRN_HSRR0
+       subi    r11,r11,4
        mtspr   SPRN_HSRR0,r11
        mtcrf   0x80,r9
        ld      r9,PACA_EXGEN+EX_R9(r13)
index 6bffbc5affe76ba7847ceb74b69e16cc53ac4178..7716374786bd97c7e56390ea587e967d75c68a2e 100644 (file)
@@ -176,13 +176,27 @@ _GLOBAL(tm_reclaim)
        std     r1, PACATMSCRATCH(r13)
        ld      r1, PACAR1(r13)
 
-       /* Store the PPR in r11 and reset to decent value */
        std     r11, GPR11(r1)                  /* Temporary stash */
 
+       /*
+        * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
+        * clobbered by an exception once we turn on MSR_RI below.
+        */
+       ld      r11, PACATMSCRATCH(r13)
+       std     r11, GPR1(r1)
+
+       /*
+        * Store r13 away so we can free up the scratch SPR for the SLB fault
+        * handler (needed once we start accessing the thread_struct).
+        */
+       GET_SCRATCH0(r11)
+       std     r11, GPR13(r1)
+
        /* Reset MSR RI so we can take SLB faults again */
        li      r11, MSR_RI
        mtmsrd  r11, 1
 
+       /* Store the PPR in r11 and reset to decent value */
        mfspr   r11, SPRN_PPR
        HMT_MEDIUM
 
@@ -207,11 +221,11 @@ _GLOBAL(tm_reclaim)
        SAVE_GPR(8, r7)                         /* user r8 */
        SAVE_GPR(9, r7)                         /* user r9 */
        SAVE_GPR(10, r7)                        /* user r10 */
-       ld      r3, PACATMSCRATCH(r13)          /* user r1 */
+       ld      r3, GPR1(r1)                    /* user r1 */
        ld      r4, GPR7(r1)                    /* user r7 */
        ld      r5, GPR11(r1)                   /* user r11 */
        ld      r6, GPR12(r1)                   /* user r12 */
-       GET_SCRATCH0(8)                         /* user r13 */
+       ld      r8, GPR13(r1)                   /* user r13 */
        std     r3, GPR1(r7)
        std     r4, GPR7(r7)
        std     r5, GPR11(r7)
index 886ed94b9c13307f5fc739e899274704faa89a15..d05c8af4ac51fe4c696469664e4bf6ceb9dd7d64 100644 (file)
@@ -443,6 +443,9 @@ _GLOBAL(csum_ipv6_magic)
        addc    r0, r8, r9
        ld      r10, 0(r4)
        ld      r11, 8(r4)
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+       rotldi  r5, r5, 8
+#endif
        adde    r0, r0, r10
        add     r5, r5, r7
        adde    r0, r0, r11
index 850f3b8f4da5e55346afbbe939987b0a0642aa7d..6ae2777c220d4062860ecd64c567d754bf00da8a 100644 (file)
@@ -28,6 +28,12 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
 {
        int err;
 
+       /* Make sure we aren't patching a freed init section */
+       if (init_mem_is_free && init_section_contains(exec_addr, 4)) {
+               pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr);
+               return 0;
+       }
+
        __put_user_size(instr, patch_addr, 4, err);
        if (err)
                return err;
index 5c8530d0c611898f012e2cc8f300a7112715c351..04ccb274a6205bba58357d5897105ada90f81c0f 100644 (file)
@@ -63,6 +63,7 @@
 #endif
 
 unsigned long long memory_limit;
+bool init_mem_is_free;
 
 #ifdef CONFIG_HIGHMEM
 pte_t *kmap_pte;
@@ -396,6 +397,7 @@ void free_initmem(void)
 {
        ppc_md.progress = ppc_printk_progress;
        mark_initmem_nx();
+       init_mem_is_free = true;
        free_initmem_default(POISON_FREE_INITMEM);
 }
 
index 35ac5422903a0ee5494c92b637bcc64604c7cba3..59d07bd5374a968f938135b6c0691b0245dd7da0 100644 (file)
@@ -1204,7 +1204,9 @@ int find_and_online_cpu_nid(int cpu)
        int new_nid;
 
        /* Use associativity from first thread for all siblings */
-       vphn_get_associativity(cpu, associativity);
+       if (vphn_get_associativity(cpu, associativity))
+               return cpu_to_node(cpu);
+
        new_nid = associativity_to_nid(associativity);
        if (new_nid < 0 || !node_possible(new_nid))
                new_nid = first_online_node;
@@ -1452,7 +1454,8 @@ static struct timer_list topology_timer;
 
 static void reset_topology_timer(void)
 {
-       mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
+       if (vphn_enabled)
+               mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
 }
 
 #ifdef CONFIG_SMP
index 333b1f80c435435cbf703a477e4bb60f6181a058..b271b283c785e3a07589ea81c6b8e40e7def5a69 100644 (file)
@@ -45,7 +45,7 @@ static void scan_pkey_feature(void)
         * Since any pkey can be used for data or execute, we will just treat
         * all keys as equal and track them as one entity.
         */
-       pkeys_total = be32_to_cpu(vals[0]);
+       pkeys_total = vals[0];
        pkeys_devtree_defined = true;
 }
 
index 6c5db1acbe8dffaba711faf55d13b2baa6c6a965..fe9691040f54c26561949c469738f277c90069e6 100644 (file)
@@ -276,7 +276,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
        level_shift = entries_shift + 3;
        level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT);
 
-       if ((level_shift - 3) * levels + page_shift >= 60)
+       if ((level_shift - 3) * levels + page_shift >= 55)
                return -EINVAL;
 
        /* Allocate TCE table */
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
new file mode 100644 (file)
index 0000000..c9fecd1
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_PROTOTYPES_H
+
+#include <linux/ftrace.h>
+#include <asm-generic/asm-prototypes.h>
+
+#endif /* _ASM_RISCV_PROTOTYPES_H */
index eaa843a52907ffd8a166c09e98c1594627aff4ac..a480356e0ed886006749d69488c5af625828bf6d 100644 (file)
@@ -25,20 +25,6 @@ ENTRY(get_sev_encryption_bit)
        push    %ebx
        push    %ecx
        push    %edx
-       push    %edi
-
-       /*
-        * RIP-relative addressing is needed to access the encryption bit
-        * variable. Since we are running in 32-bit mode we need this call/pop
-        * sequence to get the proper relative addressing.
-        */
-       call    1f
-1:     popl    %edi
-       subl    $1b, %edi
-
-       movl    enc_bit(%edi), %eax
-       cmpl    $0, %eax
-       jge     .Lsev_exit
 
        /* Check if running under a hypervisor */
        movl    $1, %eax
@@ -69,15 +55,12 @@ ENTRY(get_sev_encryption_bit)
 
        movl    %ebx, %eax
        andl    $0x3f, %eax             /* Return the encryption bit location */
-       movl    %eax, enc_bit(%edi)
        jmp     .Lsev_exit
 
 .Lno_sev:
        xor     %eax, %eax
-       movl    %eax, enc_bit(%edi)
 
 .Lsev_exit:
-       pop     %edi
        pop     %edx
        pop     %ecx
        pop     %ebx
@@ -113,8 +96,6 @@ ENTRY(set_sev_encryption_mask)
 ENDPROC(set_sev_encryption_mask)
 
        .data
-enc_bit:
-       .int    0xffffffff
 
 #ifdef CONFIG_AMD_MEM_ENCRYPT
        .balign 8
index 94e1ed667b6ea383a99f1cd76d6917af0d2a1ba6..41317c50a44628e9ef4930e9f17ad6d8297c9190 100644 (file)
@@ -322,16 +322,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
 
        /*
         * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
-        * queue_hw_ctx after freeze the queue. So we could use q_usage_counter
-        * to avoid race with it. __blk_mq_update_nr_hw_queues will users
-        * synchronize_rcu to ensure all of the users go out of the critical
-        * section below and see zeroed q_usage_counter.
+        * queue_hw_ctx after freeze the queue, so we use q_usage_counter
+        * to avoid race with it.
         */
-       rcu_read_lock();
-       if (percpu_ref_is_zero(&q->q_usage_counter)) {
-               rcu_read_unlock();
+       if (!percpu_ref_tryget(&q->q_usage_counter))
                return;
-       }
 
        queue_for_each_hw_ctx(q, hctx, i) {
                struct blk_mq_tags *tags = hctx->tags;
@@ -347,7 +342,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
                        bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
                bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
        }
-       rcu_read_unlock();
+       blk_queue_exit(q);
 }
 
 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
index 85a1c1a59c72716ce2e31c280d7fd43d5c6e61e9..e3c39ea8e17b04b0787e53959cd4f68cb1a43f3d 100644 (file)
@@ -1628,7 +1628,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                BUG_ON(!rq->q);
                if (rq->mq_ctx != this_ctx) {
                        if (this_ctx) {
-                               trace_block_unplug(this_q, depth, from_schedule);
+                               trace_block_unplug(this_q, depth, !from_schedule);
                                blk_mq_sched_insert_requests(this_q, this_ctx,
                                                                &ctx_list,
                                                                from_schedule);
@@ -1648,7 +1648,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
         * on 'ctx_list'. Do those.
         */
        if (this_ctx) {
-               trace_block_unplug(this_q, depth, from_schedule);
+               trace_block_unplug(this_q, depth, !from_schedule);
                blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
                                                from_schedule);
        }
index 6a06b5d040e5dd8ffab230a3f3ae74bfa6935233..fae58b2f906fc5e0352c3f3194780abe13369784 100644 (file)
@@ -609,7 +609,7 @@ void elv_drain_elevator(struct request_queue *q)
 
        while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
                ;
-       if (q->nr_sorted && printed++ < 10) {
+       if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
                printk(KERN_ERR "%s: forced dispatching is broken "
                       "(nr_sorted=%u), please report this\n",
                       q->elevator->type->elevator_name, q->nr_sorted);
index a71d817e900ddc07ff45d240f0ae290ff408b6cd..429d20131c7e228f81bcbd6dd72ed8a21290c14f 100644 (file)
@@ -2670,8 +2670,8 @@ static void purge_persistent_grants(struct blkfront_info *info)
                        list_del(&gnt_list_entry->node);
                        gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
                        rinfo->persistent_gnts_c--;
-                       __free_page(gnt_list_entry->page);
-                       kfree(gnt_list_entry);
+                       gnt_list_entry->gref = GRANT_INVALID_REF;
+                       list_add_tail(&gnt_list_entry->node, &rinfo->grants);
                }
 
                spin_unlock_irqrestore(&rinfo->ring_lock, flags);
index ec8a4376f74fb4f9da1f369a968df457064315e2..2fab18fae4fcbbeeb44cfbc4f2517258cd3dc505 100644 (file)
@@ -180,26 +180,29 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
        data->base = of_iomap(node, 0);
        if (!data->base) {
                pr_err("Could not map PIT address\n");
-               return -ENXIO;
+               ret = -ENXIO;
+               goto exit;
        }
 
        data->mck = of_clk_get(node, 0);
        if (IS_ERR(data->mck)) {
                pr_err("Unable to get mck clk\n");
-               return PTR_ERR(data->mck);
+               ret = PTR_ERR(data->mck);
+               goto exit;
        }
 
        ret = clk_prepare_enable(data->mck);
        if (ret) {
                pr_err("Unable to enable mck\n");
-               return ret;
+               goto exit;
        }
 
        /* Get the interrupts property */
        data->irq = irq_of_parse_and_map(node, 0);
        if (!data->irq) {
                pr_err("Unable to get IRQ from DT\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto exit;
        }
 
        /*
@@ -227,7 +230,7 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
        ret = clocksource_register_hz(&data->clksrc, pit_rate);
        if (ret) {
                pr_err("Failed to register clocksource\n");
-               return ret;
+               goto exit;
        }
 
        /* Set up irq handler */
@@ -236,7 +239,8 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
                          "at91_tick", data);
        if (ret) {
                pr_err("Unable to setup IRQ\n");
-               return ret;
+               clocksource_unregister(&data->clksrc);
+               goto exit;
        }
 
        /* Set up and register clockevents */
@@ -254,6 +258,10 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
        clockevents_register_device(&data->clkevt);
 
        return 0;
+
+exit:
+       kfree(data);
+       return ret;
 }
 TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
                       at91sam926x_pit_dt_init);
index c020038ebfab2242ed844a143f3ce6706985685c..cf93f6419b5142e397747be406138dacf3278a5c 100644 (file)
@@ -130,13 +130,17 @@ static int fttmr010_timer_set_next_event(unsigned long cycles,
        cr &= ~fttmr010->t1_enable_val;
        writel(cr, fttmr010->base + TIMER_CR);
 
-       /* Setup the match register forward/backward in time */
-       cr = readl(fttmr010->base + TIMER1_COUNT);
-       if (fttmr010->count_down)
-               cr -= cycles;
-       else
-               cr += cycles;
-       writel(cr, fttmr010->base + TIMER1_MATCH1);
+       if (fttmr010->count_down) {
+               /*
+                * ASPEED Timer Controller will load TIMER1_LOAD register
+                * into TIMER1_COUNT register when the timer is re-enabled.
+                */
+               writel(cycles, fttmr010->base + TIMER1_LOAD);
+       } else {
+               /* Setup the match register forward in time */
+               cr = readl(fttmr010->base + TIMER1_COUNT);
+               writel(cr + cycles, fttmr010->base + TIMER1_MATCH1);
+       }
 
        /* Start */
        cr = readl(fttmr010->base + TIMER_CR);
index 29e2e1a78a43372ee96e64bb9b93d6b21b5288f7..6949a9113dbb417aec69b444f365746220943e16 100644 (file)
@@ -97,6 +97,9 @@ static int __init ti_32k_timer_init(struct device_node *np)
                return -ENXIO;
        }
 
+       if (!of_machine_is_compatible("ti,am43"))
+               ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
+
        ti_32k_timer.counter = ti_32k_timer.base;
 
        /*
index a1830fa25fc5bd5eed0c627592c3a11c29e121da..2a3675c24032bc8059c4c591698d6a7b5218cf1d 100644 (file)
@@ -44,7 +44,7 @@ enum _msm8996_version {
 
 struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
 
-static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
+static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
 {
        size_t len;
        u32 *msm_id;
@@ -222,7 +222,7 @@ static int __init qcom_cpufreq_kryo_init(void)
 }
 module_init(qcom_cpufreq_kryo_init);
 
-static void __init qcom_cpufreq_kryo_exit(void)
+static void __exit qcom_cpufreq_kryo_exit(void)
 {
        platform_device_unregister(kryo_cpufreq_pdev);
        platform_driver_unregister(&qcom_cpufreq_kryo_driver);
index bbe4d72ca105b001e36b1d09d382ee9e3a89ee7c..948806e57cee33f74024adb442f398579319b89d 100644 (file)
@@ -535,6 +535,11 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
        return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
 }
 
+static const struct address_space_operations dev_dax_aops = {
+       .set_page_dirty         = noop_set_page_dirty,
+       .invalidatepage         = noop_invalidatepage,
+};
+
 static int dax_open(struct inode *inode, struct file *filp)
 {
        struct dax_device *dax_dev = inode_dax(inode);
@@ -544,6 +549,7 @@ static int dax_open(struct inode *inode, struct file *filp)
        dev_dbg(&dev_dax->dev, "trace\n");
        inode->i_mapping = __dax_inode->i_mapping;
        inode->i_mapping->host = __dax_inode;
+       inode->i_mapping->a_ops = &dev_dax_aops;
        filp->f_mapping = inode->i_mapping;
        filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
        filp->private_data = dev_dax;
index 736b7e67e4ec7068bfc2ce261f073a8874d09e8d..4385f00e1d055583df06ae4b7be0e6a6a68adc51 100644 (file)
@@ -110,6 +110,26 @@ config DRM_FBDEV_OVERALLOC
          is 100. Typical values for double buffering will be 200,
          triple buffering 300.
 
+config DRM_FBDEV_LEAK_PHYS_SMEM
+       bool "Shamelessly allow leaking of fbdev physical address (DANGEROUS)"
+       depends on DRM_FBDEV_EMULATION && EXPERT
+       default n
+       help
+         In order to keep user-space compatibility, we want in certain
+         use-cases to keep leaking the fbdev physical address to the
+         user-space program handling the fbdev buffer.
+         This affects, not only, Amlogic, Allwinner or Rockchip devices
+         with ARM Mali GPUs using an userspace Blob.
+         This option is not supported by upstream developers and should be
+         removed as soon as possible and be considered as a broken and
+         legacy behaviour from a modern fbdev device driver.
+
+         Please send any bug reports when using this to your proprietary
+         software vendor that requires this.
+
+         If in doubt, say "N" or spread the word to your closed source
+         library vendor.
+
 config DRM_LOAD_EDID_FIRMWARE
        bool "Allow to specify an EDID data set instead of probing for it"
        depends on DRM
index 0cc5190f4f36e4a1b192a17b274bbd4fe38c00b6..5f3f540738187c6db03a7975bced71ea4163c9e0 100644 (file)
@@ -258,6 +258,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
 {
        int i;
 
+       cancel_delayed_work_sync(&adev->vce.idle_work);
+
        if (adev->vce.vcpu_bo == NULL)
                return 0;
 
@@ -268,7 +270,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
        if (i == AMDGPU_MAX_VCE_HANDLES)
                return 0;
 
-       cancel_delayed_work_sync(&adev->vce.idle_work);
        /* TODO: suspending running encoding sessions isn't supported */
        return -EINVAL;
 }
index 2a2eb0143f485c7ec901068970d38529c6894a04..64e527b1c02f01d4bec2f3ee9b27e7502e65b160 100644 (file)
@@ -163,11 +163,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
        unsigned size;
        void *ptr;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        if (adev->vcn.vcpu_bo == NULL)
                return 0;
 
-       cancel_delayed_work_sync(&adev->vcn.idle_work);
-
        size = amdgpu_bo_size(adev->vcn.vcpu_bo);
        ptr = adev->vcn.cpu_addr;
 
index 715422bb30db6fb1f3dd89c8fa55cd8688378b92..6a2342d727426133eb1bf29a6be8891370c8a562 100644 (file)
@@ -718,6 +718,87 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
        return NULL;
 }
 
+static void emulated_link_detect(struct dc_link *link)
+{
+       struct dc_sink_init_data sink_init_data = { 0 };
+       struct display_sink_capability sink_caps = { 0 };
+       enum dc_edid_status edid_status;
+       struct dc_context *dc_ctx = link->ctx;
+       struct dc_sink *sink = NULL;
+       struct dc_sink *prev_sink = NULL;
+
+       link->type = dc_connection_none;
+       prev_sink = link->local_sink;
+
+       if (prev_sink != NULL)
+               dc_sink_retain(prev_sink);
+
+       switch (link->connector_signal) {
+       case SIGNAL_TYPE_HDMI_TYPE_A: {
+               sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+               sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+               break;
+       }
+
+       case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+               sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+               sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+               break;
+       }
+
+       case SIGNAL_TYPE_DVI_DUAL_LINK: {
+               sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+               sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+               break;
+       }
+
+       case SIGNAL_TYPE_LVDS: {
+               sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+               sink_caps.signal = SIGNAL_TYPE_LVDS;
+               break;
+       }
+
+       case SIGNAL_TYPE_EDP: {
+               sink_caps.transaction_type =
+                       DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+               sink_caps.signal = SIGNAL_TYPE_EDP;
+               break;
+       }
+
+       case SIGNAL_TYPE_DISPLAY_PORT: {
+               sink_caps.transaction_type =
+                       DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+               sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
+               break;
+       }
+
+       default:
+               DC_ERROR("Invalid connector type! signal:%d\n",
+                       link->connector_signal);
+               return;
+       }
+
+       sink_init_data.link = link;
+       sink_init_data.sink_signal = sink_caps.signal;
+
+       sink = dc_sink_create(&sink_init_data);
+       if (!sink) {
+               DC_ERROR("Failed to create sink!\n");
+               return;
+       }
+
+       link->local_sink = sink;
+
+       edid_status = dm_helpers_read_local_edid(
+                       link->ctx,
+                       link,
+                       sink);
+
+       if (edid_status != EDID_OK)
+               DC_ERROR("Failed to read EDID");
+
+}
+
 static int dm_resume(void *handle)
 {
        struct amdgpu_device *adev = handle;
@@ -731,6 +812,7 @@ static int dm_resume(void *handle)
        struct drm_plane *plane;
        struct drm_plane_state *new_plane_state;
        struct dm_plane_state *dm_new_plane_state;
+       enum dc_connection_type new_connection_type = dc_connection_none;
        int ret;
        int i;
 
@@ -761,7 +843,13 @@ static int dm_resume(void *handle)
                        continue;
 
                mutex_lock(&aconnector->hpd_lock);
-               dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+               if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+                       DRM_ERROR("KMS: Failed to detect connector\n");
+
+               if (aconnector->base.force && new_connection_type == dc_connection_none)
+                       emulated_link_detect(aconnector->dc_link);
+               else
+                       dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
 
                if (aconnector->fake_enable && aconnector->dc_link->local_sink)
                        aconnector->fake_enable = false;
@@ -1010,6 +1098,7 @@ static void handle_hpd_irq(void *param)
        struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
        struct drm_connector *connector = &aconnector->base;
        struct drm_device *dev = connector->dev;
+       enum dc_connection_type new_connection_type = dc_connection_none;
 
        /*
         * In case of failure or MST no need to update connector status or notify the OS
@@ -1020,7 +1109,21 @@ static void handle_hpd_irq(void *param)
        if (aconnector->fake_enable)
                aconnector->fake_enable = false;
 
-       if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
+       if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+               DRM_ERROR("KMS: Failed to detect connector\n");
+
+       if (aconnector->base.force && new_connection_type == dc_connection_none) {
+               emulated_link_detect(aconnector->dc_link);
+
+
+               drm_modeset_lock_all(dev);
+               dm_restore_drm_connector_state(dev, connector);
+               drm_modeset_unlock_all(dev);
+
+               if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+                       drm_kms_helper_hotplug_event(dev);
+
+       } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
                amdgpu_dm_update_connector_after_detect(aconnector);
 
 
@@ -1120,6 +1223,7 @@ static void handle_hpd_rx_irq(void *param)
        struct drm_device *dev = connector->dev;
        struct dc_link *dc_link = aconnector->dc_link;
        bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
+       enum dc_connection_type new_connection_type = dc_connection_none;
 
        /*
         * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
@@ -1132,7 +1236,24 @@ static void handle_hpd_rx_irq(void *param)
        if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
                        !is_mst_root_connector) {
                /* Downstream Port status changed. */
-               if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
+               if (!dc_link_detect_sink(dc_link, &new_connection_type))
+                       DRM_ERROR("KMS: Failed to detect connector\n");
+
+               if (aconnector->base.force && new_connection_type == dc_connection_none) {
+                       emulated_link_detect(dc_link);
+
+                       if (aconnector->fake_enable)
+                               aconnector->fake_enable = false;
+
+                       amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+                       drm_modeset_lock_all(dev);
+                       dm_restore_drm_connector_state(dev, connector);
+                       drm_modeset_unlock_all(dev);
+
+                       drm_kms_helper_hotplug_event(dev);
+               } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
 
                        if (aconnector->fake_enable)
                                aconnector->fake_enable = false;
@@ -1529,6 +1650,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
        struct amdgpu_mode_info *mode_info = &adev->mode_info;
        uint32_t link_cnt;
        int32_t total_overlay_planes, total_primary_planes;
+       enum dc_connection_type new_connection_type = dc_connection_none;
 
        link_cnt = dm->dc->caps.max_links;
        if (amdgpu_dm_mode_config_init(dm->adev)) {
@@ -1595,7 +1717,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 
                link = dc_get_link_at_index(dm->dc, i);
 
-               if (dc_link_detect(link, DETECT_REASON_BOOT)) {
+               if (!dc_link_detect_sink(link, &new_connection_type))
+                       DRM_ERROR("KMS: Failed to detect connector\n");
+
+               if (aconnector->base.force && new_connection_type == dc_connection_none) {
+                       emulated_link_detect(link);
+                       amdgpu_dm_update_connector_after_detect(aconnector);
+
+               } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
                        amdgpu_dm_update_connector_after_detect(aconnector);
                        register_backlight_device(dm, link);
                }
@@ -2638,7 +2767,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        if (dm_state && dm_state->freesync_capable)
                stream->ignore_msa_timing_param = true;
 finish:
-       if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL)
+       if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
                dc_sink_release(sink);
 
        return stream;
index 9f9503a9b9aad0ed7439e3af78b9553c0ae02db2..fb04a4ad141fdb68f68a747f6c4474a15e7da8a2 100644 (file)
@@ -198,7 +198,7 @@ static bool program_hpd_filter(
        return result;
 }
 
-static bool detect_sink(struct dc_link *link, enum dc_connection_type *type)
+bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
 {
        uint32_t is_hpd_high = 0;
        struct gpio *hpd_pin;
@@ -612,7 +612,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
        if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
                return false;
 
-       if (false == detect_sink(link, &new_connection_type)) {
+       if (false == dc_link_detect_sink(link, &new_connection_type)) {
                BREAK_TO_DEBUGGER();
                return false;
        }
index 438fb35d87b81da0e57285e3915f009346948970..3bfdccceb524427c40ac0a2b063710c2bb66bbe3 100644 (file)
@@ -216,6 +216,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
 
 bool dc_link_is_dp_sink_present(struct dc_link *link);
 
+bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
 /*
  * DPCD access interfaces
  */
index 6b7cccc486d891ed1b7dc17526b8ab7483755271..b75ede5f84f76837960463387a90ca35aa7ac62a 100644 (file)
@@ -2537,7 +2537,7 @@ static void pplib_apply_display_requirements(
        dc->prev_display_config = *pp_display_cfg;
 }
 
-void dce110_set_bandwidth(
+static void dce110_set_bandwidth(
                struct dc *dc,
                struct dc_state *context,
                bool decrease_allowed)
index e4c5db75c4c656b010e16ab490286801bb94bfb4..d6db3dbd90153ba4a3f9511eb494552b143b4255 100644 (file)
@@ -68,11 +68,6 @@ void dce110_fill_display_configs(
        const struct dc_state *context,
        struct dm_pp_display_configuration *pp_display_cfg);
 
-void dce110_set_bandwidth(
-               struct dc *dc,
-               struct dc_state *context,
-               bool decrease_allowed);
-
 uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
 
 void dp_receiver_power_ctrl(struct dc_link *link, bool on);
index 5853522a618298a6bdc20df629f1c494d13468ae..eb0f5f9a973b9b2f79793023d8f1cdf21aae787c 100644 (file)
@@ -244,17 +244,6 @@ static void dce120_update_dchub(
        dh_data->dchub_info_valid = false;
 }
 
-static void dce120_set_bandwidth(
-               struct dc *dc,
-               struct dc_state *context,
-               bool decrease_allowed)
-{
-       if (context->stream_count <= 0)
-               return;
-
-       dce110_set_bandwidth(dc, context, decrease_allowed);
-}
-
 void dce120_hw_sequencer_construct(struct dc *dc)
 {
        /* All registers used by dce11.2 match those in dce11 in offset and
@@ -263,6 +252,5 @@ void dce120_hw_sequencer_construct(struct dc *dc)
        dce110_hw_sequencer_construct(dc);
        dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
        dc->hwss.update_dchub = dce120_update_dchub;
-       dc->hwss.set_bandwidth = dce120_set_bandwidth;
 }
 
index a504a5e05676d81fbc92308645fec0dfcdce9a2b..9b111e8468477fa492e4e5616bc6a9701b0ef6b8 100644 (file)
@@ -56,6 +56,25 @@ MODULE_PARM_DESC(drm_fbdev_overalloc,
                 "Overallocation of the fbdev buffer (%) [default="
                 __MODULE_STRING(CONFIG_DRM_FBDEV_OVERALLOC) "]");
 
+/*
+ * In order to keep user-space compatibility, we want in certain use-cases
+ * to keep leaking the fbdev physical address to the user-space program
+ * handling the fbdev buffer.
+ * This is a bad habit essentially kept into closed source opengl driver
+ * that should really be moved into open-source upstream projects instead
+ * of using legacy physical addresses in user space to communicate with
+ * other out-of-tree kernel modules.
+ *
+ * This module_param *should* be removed as soon as possible and be
+ * considered as a broken and legacy behaviour from a modern fbdev device.
+ */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
+static bool drm_leak_fbdev_smem = false;
+module_param_unsafe(drm_leak_fbdev_smem, bool, 0600);
+MODULE_PARM_DESC(fbdev_emulation,
+                "Allow unsafe leaking fbdev physical smem address [default=false]");
+#endif
+
 static LIST_HEAD(kernel_fb_helper_list);
 static DEFINE_MUTEX(kernel_fb_helper_lock);
 
@@ -2670,8 +2689,12 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
 
        info = fb_helper->fbdev;
        info->var.pixclock = 0;
-       /* don't leak any physical addresses to userspace */
-       info->flags |= FBINFO_HIDE_SMEM_START;
+       /* Shamelessly allow physical address leaking to userspace */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
+       if (!drm_leak_fbdev_smem)
+#endif
+               /* don't leak any physical addresses to userspace */
+               info->flags |= FBINFO_HIDE_SMEM_START;
 
        /* Need to drop locks to avoid recursive deadlock in
         * register_framebuffer. This is ok because the only thing left to do is
@@ -3081,6 +3104,12 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
        fbi->screen_size = fb->height * fb->pitches[0];
        fbi->fix.smem_len = fbi->screen_size;
        fbi->screen_buffer = buffer->vaddr;
+       /* Shamelessly leak the physical address to user-space */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
+       if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
+               fbi->fix.smem_start =
+                       page_to_phys(virt_to_page(fbi->screen_buffer));
+#endif
        strcpy(fbi->fix.id, "DRM emulated");
 
        drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
index 0db486d10d1c2040d9ed97551b662f38fd32076c..c33f95e08e1b8bc8c6e987a95bc4ff011215618f 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/err.h>
 #include <linux/module.h>
 
-#include <drm/drm_device.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_panel.h>
 
@@ -105,13 +104,6 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
        if (panel->connector)
                return -EBUSY;
 
-       panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
-       if (!panel->link) {
-               dev_err(panel->dev, "failed to link panel to %s\n",
-                       dev_name(connector->dev->dev));
-               return -EINVAL;
-       }
-
        panel->connector = connector;
        panel->drm = connector->dev;
 
@@ -133,8 +125,6 @@ EXPORT_SYMBOL(drm_panel_attach);
  */
 int drm_panel_detach(struct drm_panel *panel)
 {
-       device_link_del(panel->link);
-
        panel->connector = NULL;
        panel->drm = NULL;
 
index 5bcb3ef9b2566ec8fb44d0deacdb55ddbf15dbbc..5c2091dbd230307bcd8825674b0206f0433ec9d1 100644 (file)
@@ -113,6 +113,8 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
 {
        int ret;
 
+       WARN_ON(*fence);
+
        *fence = drm_syncobj_fence_get(syncobj);
        if (*fence)
                return 1;
@@ -717,6 +719,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 
        if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
                for (i = 0; i < count; ++i) {
+                       if (entries[i].fence)
+                               continue;
+
                        drm_syncobj_fence_get_or_add_callback(syncobjs[i],
                                                              &entries[i].fence,
                                                              &entries[i].syncobj_cb,
index 9b2720b41571f245a1ba5ad677bb0566d95ca207..83c1f46670bfea9dcbe95e42da598c8665576b03 100644 (file)
@@ -592,8 +592,6 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct component_match *match = NULL;
 
-       dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-
        if (!dev->platform_data) {
                struct device_node *core_node;
 
@@ -655,13 +653,30 @@ static int __init etnaviv_init(void)
        for_each_compatible_node(np, NULL, "vivante,gc") {
                if (!of_device_is_available(np))
                        continue;
-               pdev = platform_device_register_simple("etnaviv", -1,
-                                                      NULL, 0);
-               if (IS_ERR(pdev)) {
-                       ret = PTR_ERR(pdev);
+
+               pdev = platform_device_alloc("etnaviv", -1);
+               if (!pdev) {
+                       ret = -ENOMEM;
+                       of_node_put(np);
+                       goto unregister_platform_driver;
+               }
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+               /*
+                * Apply the same DMA configuration to the virtual etnaviv
+                * device as the GPU we found. This assumes that all Vivante
+                * GPUs in the system share the same DMA constraints.
+                */
+               of_dma_configure(&pdev->dev, np, true);
+
+               ret = platform_device_add(pdev);
+               if (ret) {
+                       platform_device_put(pdev);
                        of_node_put(np);
                        goto unregister_platform_driver;
                }
+
                etnaviv_drm = pdev;
                of_node_put(np);
                break;
index b599f74692e5c60b79f933fc52f8201ff276e7ea..6f76baf4550ada0fc6a75990abde632914f656a3 100644 (file)
@@ -149,37 +149,15 @@ static struct drm_driver exynos_drm_driver = {
 static int exynos_drm_suspend(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
-       struct exynos_drm_private *private;
-
-       if (!drm_dev)
-               return 0;
-
-       private = drm_dev->dev_private;
-
-       drm_kms_helper_poll_disable(drm_dev);
-       exynos_drm_fbdev_suspend(drm_dev);
-       private->suspend_state = drm_atomic_helper_suspend(drm_dev);
-       if (IS_ERR(private->suspend_state)) {
-               exynos_drm_fbdev_resume(drm_dev);
-               drm_kms_helper_poll_enable(drm_dev);
-               return PTR_ERR(private->suspend_state);
-       }
 
-       return 0;
+       return  drm_mode_config_helper_suspend(drm_dev);
 }
 
 static void exynos_drm_resume(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
-       struct exynos_drm_private *private;
-
-       if (!drm_dev)
-               return;
 
-       private = drm_dev->dev_private;
-       drm_atomic_helper_resume(drm_dev, private->suspend_state);
-       exynos_drm_fbdev_resume(drm_dev);
-       drm_kms_helper_poll_enable(drm_dev);
+       drm_mode_config_helper_resume(drm_dev);
 }
 
 static const struct dev_pm_ops exynos_drm_pm_ops = {
index c737c4bd2c19b3f2cf141fadc7e1b1d4344b88db..ec9604f1272b50d12b44a9f759693dc4a8eaebf6 100644 (file)
@@ -92,6 +92,8 @@ struct exynos_drm_plane {
 #define EXYNOS_DRM_PLANE_CAP_SCALE     (1 << 1)
 #define EXYNOS_DRM_PLANE_CAP_ZPOS      (1 << 2)
 #define EXYNOS_DRM_PLANE_CAP_TILE      (1 << 3)
+#define EXYNOS_DRM_PLANE_CAP_PIX_BLEND (1 << 4)
+#define EXYNOS_DRM_PLANE_CAP_WIN_BLEND (1 << 5)
 
 /*
  * Exynos DRM plane configuration structure.
@@ -195,7 +197,6 @@ struct drm_exynos_file_private {
  */
 struct exynos_drm_private {
        struct drm_fb_helper *fb_helper;
-       struct drm_atomic_state *suspend_state;
 
        struct device *g2d_dev;
        struct device *dma_dev;
index 781b82c2c579b2707947c0c732001f6e794d9713..07af7758066db47c866a86a2be8fdfe5386421a5 100644 (file)
@@ -255,6 +255,7 @@ struct exynos_dsi {
        struct mipi_dsi_host dsi_host;
        struct drm_connector connector;
        struct drm_panel *panel;
+       struct drm_bridge *out_bridge;
        struct device *dev;
 
        void __iomem *reg_base;
@@ -279,7 +280,7 @@ struct exynos_dsi {
        struct list_head transfer_list;
 
        const struct exynos_dsi_driver_data *driver_data;
-       struct device_node *bridge_node;
+       struct device_node *in_bridge_node;
 };
 
 #define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
@@ -1382,29 +1383,37 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
                return;
 
        pm_runtime_get_sync(dsi->dev);
-
        dsi->state |= DSIM_STATE_ENABLED;
 
-       ret = drm_panel_prepare(dsi->panel);
-       if (ret < 0) {
-               dsi->state &= ~DSIM_STATE_ENABLED;
-               pm_runtime_put_sync(dsi->dev);
-               return;
+       if (dsi->panel) {
+               ret = drm_panel_prepare(dsi->panel);
+               if (ret < 0)
+                       goto err_put_sync;
+       } else {
+               drm_bridge_pre_enable(dsi->out_bridge);
        }
 
        exynos_dsi_set_display_mode(dsi);
        exynos_dsi_set_display_enable(dsi, true);
 
-       ret = drm_panel_enable(dsi->panel);
-       if (ret < 0) {
-               dsi->state &= ~DSIM_STATE_ENABLED;
-               exynos_dsi_set_display_enable(dsi, false);
-               drm_panel_unprepare(dsi->panel);
-               pm_runtime_put_sync(dsi->dev);
-               return;
+       if (dsi->panel) {
+               ret = drm_panel_enable(dsi->panel);
+               if (ret < 0)
+                       goto err_display_disable;
+       } else {
+               drm_bridge_enable(dsi->out_bridge);
        }
 
        dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
+       return;
+
+err_display_disable:
+       exynos_dsi_set_display_enable(dsi, false);
+       drm_panel_unprepare(dsi->panel);
+
+err_put_sync:
+       dsi->state &= ~DSIM_STATE_ENABLED;
+       pm_runtime_put(dsi->dev);
 }
 
 static void exynos_dsi_disable(struct drm_encoder *encoder)
@@ -1417,11 +1426,11 @@ static void exynos_dsi_disable(struct drm_encoder *encoder)
        dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
 
        drm_panel_disable(dsi->panel);
+       drm_bridge_disable(dsi->out_bridge);
        exynos_dsi_set_display_enable(dsi, false);
        drm_panel_unprepare(dsi->panel);
-
+       drm_bridge_post_disable(dsi->out_bridge);
        dsi->state &= ~DSIM_STATE_ENABLED;
-
        pm_runtime_put_sync(dsi->dev);
 }
 
@@ -1499,7 +1508,30 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
                                  struct mipi_dsi_device *device)
 {
        struct exynos_dsi *dsi = host_to_dsi(host);
-       struct drm_device *drm = dsi->connector.dev;
+       struct drm_encoder *encoder = &dsi->encoder;
+       struct drm_device *drm = encoder->dev;
+       struct drm_bridge *out_bridge;
+
+       out_bridge  = of_drm_find_bridge(device->dev.of_node);
+       if (out_bridge) {
+               drm_bridge_attach(encoder, out_bridge, NULL);
+               dsi->out_bridge = out_bridge;
+               encoder->bridge = NULL;
+       } else {
+               int ret = exynos_dsi_create_connector(encoder);
+
+               if (ret) {
+                       DRM_ERROR("failed to create connector ret = %d\n", ret);
+                       drm_encoder_cleanup(encoder);
+                       return ret;
+               }
+
+               dsi->panel = of_drm_find_panel(device->dev.of_node);
+               if (dsi->panel) {
+                       drm_panel_attach(dsi->panel, &dsi->connector);
+                       dsi->connector.status = connector_status_connected;
+               }
+       }
 
        /*
         * This is a temporary solution and should be made by more generic way.
@@ -1518,14 +1550,6 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
        dsi->lanes = device->lanes;
        dsi->format = device->format;
        dsi->mode_flags = device->mode_flags;
-       dsi->panel = of_drm_find_panel(device->dev.of_node);
-       if (IS_ERR(dsi->panel))
-               dsi->panel = NULL;
-
-       if (dsi->panel) {
-               drm_panel_attach(dsi->panel, &dsi->connector);
-               dsi->connector.status = connector_status_connected;
-       }
        exynos_drm_crtc_get_by_type(drm, EXYNOS_DISPLAY_TYPE_LCD)->i80_mode =
                        !(dsi->mode_flags & MIPI_DSI_MODE_VIDEO);
 
@@ -1541,19 +1565,21 @@ static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
                                  struct mipi_dsi_device *device)
 {
        struct exynos_dsi *dsi = host_to_dsi(host);
-       struct drm_device *drm = dsi->connector.dev;
-
-       mutex_lock(&drm->mode_config.mutex);
+       struct drm_device *drm = dsi->encoder.dev;
 
        if (dsi->panel) {
+               mutex_lock(&drm->mode_config.mutex);
                exynos_dsi_disable(&dsi->encoder);
                drm_panel_detach(dsi->panel);
                dsi->panel = NULL;
                dsi->connector.status = connector_status_disconnected;
+               mutex_unlock(&drm->mode_config.mutex);
+       } else {
+               if (dsi->out_bridge->funcs->detach)
+                       dsi->out_bridge->funcs->detach(dsi->out_bridge);
+               dsi->out_bridge = NULL;
        }
 
-       mutex_unlock(&drm->mode_config.mutex);
-
        if (drm->mode_config.poll_enabled)
                drm_kms_helper_hotplug_event(drm);
 
@@ -1634,7 +1660,7 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
        if (ret < 0)
                return ret;
 
-       dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0);
+       dsi->in_bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0);
 
        return 0;
 }
@@ -1645,7 +1671,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
        struct drm_encoder *encoder = dev_get_drvdata(dev);
        struct exynos_dsi *dsi = encoder_to_dsi(encoder);
        struct drm_device *drm_dev = data;
-       struct drm_bridge *bridge;
+       struct drm_bridge *in_bridge;
        int ret;
 
        drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
@@ -1657,17 +1683,10 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
        if (ret < 0)
                return ret;
 
-       ret = exynos_dsi_create_connector(encoder);
-       if (ret) {
-               DRM_ERROR("failed to create connector ret = %d\n", ret);
-               drm_encoder_cleanup(encoder);
-               return ret;
-       }
-
-       if (dsi->bridge_node) {
-               bridge = of_drm_find_bridge(dsi->bridge_node);
-               if (bridge)
-                       drm_bridge_attach(encoder, bridge, NULL);
+       if (dsi->in_bridge_node) {
+               in_bridge = of_drm_find_bridge(dsi->in_bridge_node);
+               if (in_bridge)
+                       drm_bridge_attach(encoder, in_bridge, NULL);
        }
 
        return mipi_dsi_host_register(&dsi->dsi_host);
@@ -1786,7 +1805,7 @@ static int exynos_dsi_remove(struct platform_device *pdev)
 {
        struct exynos_dsi *dsi = platform_get_drvdata(pdev);
 
-       of_node_put(dsi->bridge_node);
+       of_node_put(dsi->in_bridge_node);
 
        pm_runtime_disable(&pdev->dev);
 
index 132dd52d0ac7f1d2dda11e1607a44aa9495f8cc4..918dd2c822098444c6708761baded3ef95420025 100644 (file)
@@ -270,20 +270,3 @@ void exynos_drm_fbdev_fini(struct drm_device *dev)
        private->fb_helper = NULL;
 }
 
-void exynos_drm_fbdev_suspend(struct drm_device *dev)
-{
-       struct exynos_drm_private *private = dev->dev_private;
-
-       console_lock();
-       drm_fb_helper_set_suspend(private->fb_helper, 1);
-       console_unlock();
-}
-
-void exynos_drm_fbdev_resume(struct drm_device *dev)
-{
-       struct exynos_drm_private *private = dev->dev_private;
-
-       console_lock();
-       drm_fb_helper_set_suspend(private->fb_helper, 0);
-       console_unlock();
-}
index b33847223a85506cd94022874ca6da7f82bf9364..6840b6aadbc0892ed844835bc1530f38ae0e86c0 100644 (file)
@@ -19,8 +19,6 @@
 
 int exynos_drm_fbdev_init(struct drm_device *dev);
 void exynos_drm_fbdev_fini(struct drm_device *dev);
-void exynos_drm_fbdev_suspend(struct drm_device *drm);
-void exynos_drm_fbdev_resume(struct drm_device *drm);
 
 #else
 
@@ -39,14 +37,6 @@ static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
 
 #define exynos_drm_output_poll_changed (NULL)
 
-static inline void exynos_drm_fbdev_suspend(struct drm_device *drm)
-{
-}
-
-static inline void exynos_drm_fbdev_resume(struct drm_device *drm)
-{
-}
-
 #endif
 
 #endif
index 7ba414b52faa940595a028db7fa3e959bc7a58cd..ce15d46bfce8a57dc007e0af5317dd9d1f7385a0 100644 (file)
@@ -448,7 +448,7 @@ static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
 }
 
 
-static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt)
+static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt, bool tiled)
 {
        u32 cfg;
 
@@ -514,6 +514,9 @@ static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt)
                break;
        }
 
+       if (tiled)
+               cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
+
        gsc_write(cfg, GSC_IN_CON);
 }
 
@@ -632,7 +635,7 @@ static void gsc_src_set_addr(struct gsc_context *ctx, u32 buf_id,
        gsc_src_set_buf_seq(ctx, buf_id, true);
 }
 
-static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt)
+static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt, bool tiled)
 {
        u32 cfg;
 
@@ -698,6 +701,9 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt)
                break;
        }
 
+       if (tiled)
+               cfg |= (GSC_IN_TILE_C_16x8 | GSC_OUT_TILE_MODE);
+
        gsc_write(cfg, GSC_OUT_CON);
 }
 
@@ -1122,11 +1128,11 @@ static int gsc_commit(struct exynos_drm_ipp *ipp,
                return ret;
        }
 
-       gsc_src_set_fmt(ctx, task->src.buf.fourcc);
+       gsc_src_set_fmt(ctx, task->src.buf.fourcc, task->src.buf.modifier);
        gsc_src_set_transf(ctx, task->transform.rotation);
        gsc_src_set_size(ctx, &task->src);
        gsc_src_set_addr(ctx, 0, &task->src);
-       gsc_dst_set_fmt(ctx, task->dst.buf.fourcc);
+       gsc_dst_set_fmt(ctx, task->dst.buf.fourcc, task->dst.buf.modifier);
        gsc_dst_set_size(ctx, &task->dst);
        gsc_dst_set_addr(ctx, 0, &task->dst);
        gsc_set_prescaler(ctx, &ctx->sc, &task->src.rect, &task->dst.rect);
@@ -1200,6 +1206,10 @@ static const unsigned int gsc_formats[] = {
        DRM_FORMAT_YUV420, DRM_FORMAT_YVU420, DRM_FORMAT_YUV422,
 };
 
+static const unsigned int gsc_tiled_formats[] = {
+       DRM_FORMAT_NV12, DRM_FORMAT_NV21,
+};
+
 static int gsc_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -1207,23 +1217,24 @@ static int gsc_probe(struct platform_device *pdev)
        struct exynos_drm_ipp_formats *formats;
        struct gsc_context *ctx;
        struct resource *res;
-       int ret, i;
+       int num_formats, ret, i, j;
 
        ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
                return -ENOMEM;
 
-       formats = devm_kcalloc(dev,
-                              ARRAY_SIZE(gsc_formats), sizeof(*formats),
-                              GFP_KERNEL);
-       if (!formats)
-               return -ENOMEM;
-
        driver_data = (struct gsc_driverdata *)of_device_get_match_data(dev);
        ctx->dev = dev;
        ctx->num_clocks = driver_data->num_clocks;
        ctx->clk_names = driver_data->clk_names;
 
+       /* construct formats/limits array */
+       num_formats = ARRAY_SIZE(gsc_formats) + ARRAY_SIZE(gsc_tiled_formats);
+       formats = devm_kcalloc(dev, num_formats, sizeof(*formats), GFP_KERNEL);
+       if (!formats)
+               return -ENOMEM;
+
+       /* linear formats */
        for (i = 0; i < ARRAY_SIZE(gsc_formats); i++) {
                formats[i].fourcc = gsc_formats[i];
                formats[i].type = DRM_EXYNOS_IPP_FORMAT_SOURCE |
@@ -1231,8 +1242,19 @@ static int gsc_probe(struct platform_device *pdev)
                formats[i].limits = driver_data->limits;
                formats[i].num_limits = driver_data->num_limits;
        }
+
+       /* tiled formats */
+       for (j = i, i = 0; i < ARRAY_SIZE(gsc_tiled_formats); j++, i++) {
+               formats[j].fourcc = gsc_tiled_formats[i];
+               formats[j].modifier = DRM_FORMAT_MOD_SAMSUNG_16_16_TILE;
+               formats[j].type = DRM_EXYNOS_IPP_FORMAT_SOURCE |
+                                 DRM_EXYNOS_IPP_FORMAT_DESTINATION;
+               formats[j].limits = driver_data->limits;
+               formats[j].num_limits = driver_data->num_limits;
+       }
+
        ctx->formats = formats;
-       ctx->num_formats = ARRAY_SIZE(gsc_formats);
+       ctx->num_formats = num_formats;
 
        /* clock control */
        for (i = 0; i < ctx->num_clocks; i++) {
index dba29aec59b44d873df58555ece705b0d4de6d74..df0508e0e49e0e4f7b60c08dfc0230b50f8a5740 100644 (file)
@@ -131,16 +131,14 @@ static void exynos_drm_plane_reset(struct drm_plane *plane)
 
        if (plane->state) {
                exynos_state = to_exynos_plane_state(plane->state);
-               if (exynos_state->base.fb)
-                       drm_framebuffer_put(exynos_state->base.fb);
+               __drm_atomic_helper_plane_destroy_state(plane->state);
                kfree(exynos_state);
                plane->state = NULL;
        }
 
        exynos_state = kzalloc(sizeof(*exynos_state), GFP_KERNEL);
        if (exynos_state) {
-               plane->state = &exynos_state->base;
-               plane->state->plane = plane;
+               __drm_atomic_helper_plane_reset(plane, &exynos_state->base);
                plane->state->zpos = exynos_plane->config->zpos;
        }
 }
@@ -300,6 +298,10 @@ int exynos_plane_init(struct drm_device *dev,
                      const struct exynos_drm_plane_config *config)
 {
        int err;
+       unsigned int supported_modes = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+                                      BIT(DRM_MODE_BLEND_PREMULTI) |
+                                      BIT(DRM_MODE_BLEND_COVERAGE);
+       struct drm_plane *plane = &exynos_plane->base;
 
        err = drm_universal_plane_init(dev, &exynos_plane->base,
                                       1 << dev->mode_config.num_crtc,
@@ -320,5 +322,11 @@ int exynos_plane_init(struct drm_device *dev,
        exynos_plane_attach_zpos_property(&exynos_plane->base, config->zpos,
                           !(config->capabilities & EXYNOS_DRM_PLANE_CAP_ZPOS));
 
+       if (config->capabilities & EXYNOS_DRM_PLANE_CAP_PIX_BLEND)
+               drm_plane_create_blend_mode_property(plane, supported_modes);
+
+       if (config->capabilities & EXYNOS_DRM_PLANE_CAP_WIN_BLEND)
+               drm_plane_create_alpha_property(plane);
+
        return 0;
 }
index 0ddb6eec7b113ea306fea4bde563e8ecb9945495..cd66774e817d37a9d49aa61c40733192164531e5 100644 (file)
@@ -49,56 +49,46 @@ struct scaler_context {
        const struct scaler_data        *scaler_data;
 };
 
-static u32 scaler_get_format(u32 drm_fmt)
+struct scaler_format {
+       u32     drm_fmt;
+       u32     internal_fmt;
+       u32     chroma_tile_w;
+       u32     chroma_tile_h;
+};
+
+static const struct scaler_format scaler_formats[] = {
+       { DRM_FORMAT_NV12, SCALER_YUV420_2P_UV, 8, 8 },
+       { DRM_FORMAT_NV21, SCALER_YUV420_2P_VU, 8, 8 },
+       { DRM_FORMAT_YUV420, SCALER_YUV420_3P, 8, 8 },
+       { DRM_FORMAT_YUYV, SCALER_YUV422_1P_YUYV, 16, 16 },
+       { DRM_FORMAT_UYVY, SCALER_YUV422_1P_UYVY, 16, 16 },
+       { DRM_FORMAT_YVYU, SCALER_YUV422_1P_YVYU, 16, 16 },
+       { DRM_FORMAT_NV16, SCALER_YUV422_2P_UV, 8, 16 },
+       { DRM_FORMAT_NV61, SCALER_YUV422_2P_VU, 8, 16 },
+       { DRM_FORMAT_YUV422, SCALER_YUV422_3P, 8, 16 },
+       { DRM_FORMAT_NV24, SCALER_YUV444_2P_UV, 16, 16 },
+       { DRM_FORMAT_NV42, SCALER_YUV444_2P_VU, 16, 16 },
+       { DRM_FORMAT_YUV444, SCALER_YUV444_3P, 16, 16 },
+       { DRM_FORMAT_RGB565, SCALER_RGB_565, 0, 0 },
+       { DRM_FORMAT_XRGB1555, SCALER_ARGB1555, 0, 0 },
+       { DRM_FORMAT_ARGB1555, SCALER_ARGB1555, 0, 0 },
+       { DRM_FORMAT_XRGB4444, SCALER_ARGB4444, 0, 0 },
+       { DRM_FORMAT_ARGB4444, SCALER_ARGB4444, 0, 0 },
+       { DRM_FORMAT_XRGB8888, SCALER_ARGB8888, 0, 0 },
+       { DRM_FORMAT_ARGB8888, SCALER_ARGB8888, 0, 0 },
+       { DRM_FORMAT_RGBX8888, SCALER_RGBA8888, 0, 0 },
+       { DRM_FORMAT_RGBA8888, SCALER_RGBA8888, 0, 0 },
+};
+
+static const struct scaler_format *scaler_get_format(u32 drm_fmt)
 {
-       switch (drm_fmt) {
-       case DRM_FORMAT_NV12:
-               return SCALER_YUV420_2P_UV;
-       case DRM_FORMAT_NV21:
-               return SCALER_YUV420_2P_VU;
-       case DRM_FORMAT_YUV420:
-               return SCALER_YUV420_3P;
-       case DRM_FORMAT_YUYV:
-               return SCALER_YUV422_1P_YUYV;
-       case DRM_FORMAT_UYVY:
-               return SCALER_YUV422_1P_UYVY;
-       case DRM_FORMAT_YVYU:
-               return SCALER_YUV422_1P_YVYU;
-       case DRM_FORMAT_NV16:
-               return SCALER_YUV422_2P_UV;
-       case DRM_FORMAT_NV61:
-               return SCALER_YUV422_2P_VU;
-       case DRM_FORMAT_YUV422:
-               return SCALER_YUV422_3P;
-       case DRM_FORMAT_NV24:
-               return SCALER_YUV444_2P_UV;
-       case DRM_FORMAT_NV42:
-               return SCALER_YUV444_2P_VU;
-       case DRM_FORMAT_YUV444:
-               return SCALER_YUV444_3P;
-       case DRM_FORMAT_RGB565:
-               return SCALER_RGB_565;
-       case DRM_FORMAT_XRGB1555:
-               return SCALER_ARGB1555;
-       case DRM_FORMAT_ARGB1555:
-               return SCALER_ARGB1555;
-       case DRM_FORMAT_XRGB4444:
-               return SCALER_ARGB4444;
-       case DRM_FORMAT_ARGB4444:
-               return SCALER_ARGB4444;
-       case DRM_FORMAT_XRGB8888:
-               return SCALER_ARGB8888;
-       case DRM_FORMAT_ARGB8888:
-               return SCALER_ARGB8888;
-       case DRM_FORMAT_RGBX8888:
-               return SCALER_RGBA8888;
-       case DRM_FORMAT_RGBA8888:
-               return SCALER_RGBA8888;
-       default:
-               break;
-       }
+       int i;
 
-       return 0;
+       for (i = 0; i < ARRAY_SIZE(scaler_formats); i++)
+               if (scaler_formats[i].drm_fmt == drm_fmt)
+                       return &scaler_formats[i];
+
+       return NULL;
 }
 
 static inline int scaler_reset(struct scaler_context *scaler)
@@ -152,11 +142,11 @@ static inline void scaler_enable_int(struct scaler_context *scaler)
 }
 
 static inline void scaler_set_src_fmt(struct scaler_context *scaler,
-       u32 src_fmt)
+       u32 src_fmt, u32 tile)
 {
        u32 val;
 
-       val = SCALER_SRC_CFG_SET_COLOR_FORMAT(src_fmt);
+       val = SCALER_SRC_CFG_SET_COLOR_FORMAT(src_fmt) | (tile << 10);
        scaler_write(val, SCALER_SRC_CFG);
 }
 
@@ -188,15 +178,20 @@ static inline void scaler_set_src_span(struct scaler_context *scaler,
        scaler_write(val, SCALER_SRC_SPAN);
 }
 
-static inline void scaler_set_src_luma_pos(struct scaler_context *scaler,
-       struct drm_exynos_ipp_task_rect *src_pos)
+static inline void scaler_set_src_luma_chroma_pos(struct scaler_context *scaler,
+                       struct drm_exynos_ipp_task_rect *src_pos,
+                       const struct scaler_format *fmt)
 {
        u32 val;
 
        val = SCALER_SRC_Y_POS_SET_YH_POS(src_pos->x << 2);
        val |=  SCALER_SRC_Y_POS_SET_YV_POS(src_pos->y << 2);
        scaler_write(val, SCALER_SRC_Y_POS);
-       scaler_write(val, SCALER_SRC_C_POS); /* ATTENTION! */
+       val = SCALER_SRC_C_POS_SET_CH_POS(
+               (src_pos->x * fmt->chroma_tile_w / 16) << 2);
+       val |=  SCALER_SRC_C_POS_SET_CV_POS(
+               (src_pos->y * fmt->chroma_tile_h / 16) << 2);
+       scaler_write(val, SCALER_SRC_C_POS);
 }
 
 static inline void scaler_set_src_wh(struct scaler_context *scaler,
@@ -366,11 +361,12 @@ static int scaler_commit(struct exynos_drm_ipp *ipp,
        struct scaler_context *scaler =
                        container_of(ipp, struct scaler_context, ipp);
 
-       u32 src_fmt = scaler_get_format(task->src.buf.fourcc);
        struct drm_exynos_ipp_task_rect *src_pos = &task->src.rect;
-
-       u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc);
        struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect;
+       const struct scaler_format *src_fmt, *dst_fmt;
+
+       src_fmt = scaler_get_format(task->src.buf.fourcc);
+       dst_fmt = scaler_get_format(task->dst.buf.fourcc);
 
        pm_runtime_get_sync(scaler->dev);
        if (scaler_reset(scaler)) {
@@ -380,13 +376,14 @@ static int scaler_commit(struct exynos_drm_ipp *ipp,
 
        scaler->task = task;
 
-       scaler_set_src_fmt(scaler, src_fmt);
+       scaler_set_src_fmt(
+               scaler, src_fmt->internal_fmt, task->src.buf.modifier != 0);
        scaler_set_src_base(scaler, &task->src);
        scaler_set_src_span(scaler, &task->src);
-       scaler_set_src_luma_pos(scaler, src_pos);
+       scaler_set_src_luma_chroma_pos(scaler, src_pos, src_fmt);
        scaler_set_src_wh(scaler, src_pos);
 
-       scaler_set_dst_fmt(scaler, dst_fmt);
+       scaler_set_dst_fmt(scaler, dst_fmt->internal_fmt);
        scaler_set_dst_base(scaler, &task->dst);
        scaler_set_dst_span(scaler, &task->dst);
        scaler_set_dst_luma_pos(scaler, dst_pos);
@@ -617,6 +614,16 @@ static const struct drm_exynos_ipp_limit scaler_5420_one_pixel_limits[] = {
                          .v = { 65536 * 1 / 4, 65536 * 16 }) },
 };
 
+static const struct drm_exynos_ipp_limit scaler_5420_tile_limits[] = {
+       { IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K })},
+       { IPP_SIZE_LIMIT(AREA, .h.align = 16, .v.align = 16) },
+       { IPP_SCALE_LIMIT(.h = {1, 1}, .v = {1, 1})},
+       { }
+};
+
+#define IPP_SRCDST_TILE_FORMAT(f, l)   \
+       IPP_SRCDST_MFORMAT(f, DRM_FORMAT_MOD_SAMSUNG_16_16_TILE, (l))
+
 static const struct exynos_drm_ipp_formats exynos5420_formats[] = {
        /* SCALER_YUV420_2P_UV */
        { IPP_SRCDST_FORMAT(NV21, scaler_5420_two_pixel_hv_limits) },
@@ -680,6 +687,18 @@ static const struct exynos_drm_ipp_formats exynos5420_formats[] = {
 
        /* SCALER_RGBA8888 */
        { IPP_SRCDST_FORMAT(RGBA8888, scaler_5420_one_pixel_limits) },
+
+       /* SCALER_YUV420_2P_UV TILE */
+       { IPP_SRCDST_TILE_FORMAT(NV21, scaler_5420_tile_limits) },
+
+       /* SCALER_YUV420_2P_VU TILE */
+       { IPP_SRCDST_TILE_FORMAT(NV12, scaler_5420_tile_limits) },
+
+       /* SCALER_YUV420_3P TILE */
+       { IPP_SRCDST_TILE_FORMAT(YUV420, scaler_5420_tile_limits) },
+
+       /* SCALER_YUV422_1P_YUYV TILE */
+       { IPP_SRCDST_TILE_FORMAT(YUYV, scaler_5420_tile_limits) },
 };
 
 static const struct scaler_data exynos5420_data = {
index ffbf4a950f696d13476b0bf641b9586ce8cc44d7..e3a4ecbc503b937e4d93a24450e9132024381db1 100644 (file)
@@ -131,14 +131,18 @@ static const struct exynos_drm_plane_config plane_configs[MIXER_WIN_NR] = {
                .pixel_formats = mixer_formats,
                .num_pixel_formats = ARRAY_SIZE(mixer_formats),
                .capabilities = EXYNOS_DRM_PLANE_CAP_DOUBLE |
-                               EXYNOS_DRM_PLANE_CAP_ZPOS,
+                               EXYNOS_DRM_PLANE_CAP_ZPOS |
+                               EXYNOS_DRM_PLANE_CAP_PIX_BLEND |
+                               EXYNOS_DRM_PLANE_CAP_WIN_BLEND,
        }, {
                .zpos = 1,
                .type = DRM_PLANE_TYPE_CURSOR,
                .pixel_formats = mixer_formats,
                .num_pixel_formats = ARRAY_SIZE(mixer_formats),
                .capabilities = EXYNOS_DRM_PLANE_CAP_DOUBLE |
-                               EXYNOS_DRM_PLANE_CAP_ZPOS,
+                               EXYNOS_DRM_PLANE_CAP_ZPOS |
+                               EXYNOS_DRM_PLANE_CAP_PIX_BLEND |
+                               EXYNOS_DRM_PLANE_CAP_WIN_BLEND,
        }, {
                .zpos = 2,
                .type = DRM_PLANE_TYPE_OVERLAY,
@@ -146,7 +150,8 @@ static const struct exynos_drm_plane_config plane_configs[MIXER_WIN_NR] = {
                .num_pixel_formats = ARRAY_SIZE(vp_formats),
                .capabilities = EXYNOS_DRM_PLANE_CAP_SCALE |
                                EXYNOS_DRM_PLANE_CAP_ZPOS |
-                               EXYNOS_DRM_PLANE_CAP_TILE,
+                               EXYNOS_DRM_PLANE_CAP_TILE |
+                               EXYNOS_DRM_PLANE_CAP_WIN_BLEND,
        },
 };
 
@@ -309,31 +314,42 @@ static void vp_default_filter(struct mixer_context *ctx)
 }
 
 static void mixer_cfg_gfx_blend(struct mixer_context *ctx, unsigned int win,
-                               bool alpha)
+                               unsigned int pixel_alpha, unsigned int alpha)
 {
+       u32 win_alpha = alpha >> 8;
        u32 val;
 
        val  = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
-       if (alpha) {
-               /* blending based on pixel alpha */
+       switch (pixel_alpha) {
+       case DRM_MODE_BLEND_PIXEL_NONE:
+               break;
+       case DRM_MODE_BLEND_COVERAGE:
+               val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
+               break;
+       case DRM_MODE_BLEND_PREMULTI:
+       default:
                val |= MXR_GRP_CFG_BLEND_PRE_MUL;
                val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
+               break;
+       }
+
+       if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+               val |= MXR_GRP_CFG_WIN_BLEND_EN;
+               val |= win_alpha;
        }
        mixer_reg_writemask(ctx, MXR_GRAPHIC_CFG(win),
                            val, MXR_GRP_CFG_MISC_MASK);
 }
 
-static void mixer_cfg_vp_blend(struct mixer_context *ctx)
+static void mixer_cfg_vp_blend(struct mixer_context *ctx, unsigned int alpha)
 {
-       u32 val;
+       u32 win_alpha = alpha >> 8;
+       u32 val = 0;
 
-       /*
-        * No blending at the moment since the NV12/NV21 pixelformats don't
-        * have an alpha channel. However the mixer supports a global alpha
-        * value for a layer. Once this functionality is exposed, we can
-        * support blending of the video layer through this.
-        */
-       val = 0;
+       if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+               val |= MXR_VID_CFG_BLEND_EN;
+               val |= win_alpha;
+       }
        mixer_reg_write(ctx, MXR_VIDEO_CFG, val);
 }
 
@@ -529,7 +545,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
        vp_reg_write(ctx, VP_BOT_C_PTR, chroma_addr[1]);
 
        mixer_cfg_layer(ctx, plane->index, priority, true);
-       mixer_cfg_vp_blend(ctx);
+       mixer_cfg_vp_blend(ctx, state->base.alpha);
 
        spin_unlock_irqrestore(&ctx->reg_slock, flags);
 
@@ -553,10 +569,16 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
        unsigned int win = plane->index;
        unsigned int x_ratio = 0, y_ratio = 0;
        unsigned int dst_x_offset, dst_y_offset;
+       unsigned int pixel_alpha;
        dma_addr_t dma_addr;
        unsigned int fmt;
        u32 val;
 
+       if (fb->format->has_alpha)
+               pixel_alpha = state->base.pixel_blend_mode;
+       else
+               pixel_alpha = DRM_MODE_BLEND_PIXEL_NONE;
+
        switch (fb->format->format) {
        case DRM_FORMAT_XRGB4444:
        case DRM_FORMAT_ARGB4444:
@@ -616,7 +638,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
        mixer_reg_write(ctx, MXR_GRAPHIC_BASE(win), dma_addr);
 
        mixer_cfg_layer(ctx, win, priority, true);
-       mixer_cfg_gfx_blend(ctx, win, fb->format->has_alpha);
+       mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha);
 
        /* layer update mandatory for mixer 16.0.33.0 */
        if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
index 189cfa2470a850a65db8b619d27c8ab58d4e27b1..d2b8194a07bfb93c71aa21d4df6a154178a1a8bb 100644 (file)
 #define MXR_CFG_SCAN_HD                        (1 << 0)
 #define MXR_CFG_SCAN_MASK              0x47
 
+/* bits for MXR_VIDEO_CFG */
+#define MXR_VID_CFG_BLEND_EN           (1 << 16)
+
 /* bits for MXR_GRAPHICn_CFG */
 #define MXR_GRP_CFG_COLOR_KEY_DISABLE  (1 << 21)
 #define MXR_GRP_CFG_BLEND_PRE_MUL      (1 << 20)
 #define MXR_GRP_CFG_WIN_BLEND_EN       (1 << 17)
 #define MXR_GRP_CFG_PIXEL_BLEND_EN     (1 << 16)
-#define MXR_GRP_CFG_MISC_MASK          ((3 << 16) | (3 << 20))
+#define MXR_GRP_CFG_MISC_MASK          ((3 << 16) | (3 << 20) | 0xff)
 #define MXR_GRP_CFG_FORMAT_VAL(x)      MXR_MASK_VAL(x, 11, 8)
 #define MXR_GRP_CFG_FORMAT_MASK                MXR_GRP_CFG_FORMAT_VAL(~0)
 #define MXR_GRP_CFG_ALPHA_VAL(x)       MXR_MASK_VAL(x, 7, 0)
index e61a9592a65081c3bc06fe456ac2157a2a28a492..ba82d916719cd1d996e7aeba45026f008acce25d 100644 (file)
@@ -1140,18 +1140,6 @@ static void dispc_ovl_set_color_mode(struct dispc_device *dispc,
        REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
 }
 
-static bool format_is_yuv(u32 fourcc)
-{
-       switch (fourcc) {
-       case DRM_FORMAT_YUYV:
-       case DRM_FORMAT_UYVY:
-       case DRM_FORMAT_NV12:
-               return true;
-       default:
-               return false;
-       }
-}
-
 static void dispc_ovl_configure_burst_type(struct dispc_device *dispc,
                                           enum omap_plane_id plane,
                                           enum omap_dss_rotation_type rotation)
@@ -1910,11 +1898,14 @@ static void dispc_ovl_set_scaling_uv(struct dispc_device *dispc,
        int scale_x = out_width != orig_width;
        int scale_y = out_height != orig_height;
        bool chroma_upscale = plane != OMAP_DSS_WB;
+       const struct drm_format_info *info;
+
+       info = drm_format_info(fourcc);
 
        if (!dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE))
                return;
 
-       if (!format_is_yuv(fourcc)) {
+       if (!info->is_yuv) {
                /* reset chroma resampling for RGB formats  */
                if (plane != OMAP_DSS_WB)
                        REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane),
@@ -2624,7 +2615,7 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
        unsigned int offset0, offset1;
        s32 row_inc;
        s32 pix_inc;
-       u16 frame_width, frame_height;
+       u16 frame_width;
        unsigned int field_offset = 0;
        u16 in_height = height;
        u16 in_width = width;
@@ -2632,6 +2623,9 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
        bool ilace = !!(vm->flags & DISPLAY_FLAGS_INTERLACED);
        unsigned long pclk = dispc_plane_pclk_rate(dispc, plane);
        unsigned long lclk = dispc_plane_lclk_rate(dispc, plane);
+       const struct drm_format_info *info;
+
+       info = drm_format_info(fourcc);
 
        /* when setting up WB, dispc_plane_pclk_rate() returns 0 */
        if (plane == OMAP_DSS_WB)
@@ -2640,7 +2634,7 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
        if (paddr == 0 && rotation_type != OMAP_DSS_ROT_TILER)
                return -EINVAL;
 
-       if (format_is_yuv(fourcc) && (in_width & 1)) {
+       if (info->is_yuv && (in_width & 1)) {
                DSSERR("input width %d is not even for YUV format\n", in_width);
                return -EINVAL;
        }
@@ -2680,7 +2674,7 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
                DSSDBG("predecimation %d x %x, new input size %d x %d\n",
                        x_predecim, y_predecim, in_width, in_height);
 
-       if (format_is_yuv(fourcc) && (in_width & 1)) {
+       if (info->is_yuv && (in_width & 1)) {
                DSSDBG("predecimated input width is not even for YUV format\n");
                DSSDBG("adjusting input width %d -> %d\n",
                        in_width, in_width & ~1);
@@ -2688,7 +2682,7 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
                in_width &= ~1;
        }
 
-       if (format_is_yuv(fourcc))
+       if (info->is_yuv)
                cconv = 1;
 
        if (ilace && !fieldmode) {
@@ -2714,13 +2708,10 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
        row_inc = 0;
        pix_inc = 0;
 
-       if (plane == OMAP_DSS_WB) {
+       if (plane == OMAP_DSS_WB)
                frame_width = out_width;
-               frame_height = out_height;
-       } else {
+       else
                frame_width = in_width;
-               frame_height = height;
-       }
 
        calc_offset(screen_width, frame_width,
                        fourcc, fieldmode, field_offset,
index 19fc4dfc429ee17c4197cf272e4dafb5a1f3552b..1aaf260aa9b8638d2e7fac7aaa36ed3fe14a0880 100644 (file)
@@ -947,7 +947,7 @@ dss_debugfs_create_file(struct dss_device *dss, const char *name,
                                &dss_debug_fops);
        if (IS_ERR(d)) {
                kfree(entry);
-               return ERR_PTR(PTR_ERR(d));
+               return ERR_CAST(d);
        }
 
        entry->dentry = d;
index 98f5ca29444ace053d27015f158b9b71db903c89..b81302c4bf9e6d49a678ff8e34829b8c0bc7dbc3 100644 (file)
@@ -164,10 +164,11 @@ static void omap_connector_destroy(struct drm_connector *connector)
 
        drm_connector_unregister(connector);
        drm_connector_cleanup(connector);
-       kfree(omap_connector);
 
        omapdss_device_put(omap_connector->output);
        omapdss_device_put(omap_connector->display);
+
+       kfree(omap_connector);
 }
 
 #define MAX_EDID  512
index c2785cc98dc91d1cee3f988d3df5abd0bc90f285..60bb3f9297bcb74359a9cd41df8e158041910af1 100644 (file)
@@ -159,6 +159,7 @@ struct dmm_platform_data {
 
 struct dmm {
        struct device *dev;
+       dma_addr_t phys_base;
        void __iomem *base;
        int irq;
 
@@ -189,6 +190,12 @@ struct dmm {
        struct list_head alloc_head;
 
        const struct dmm_platform_data *plat_data;
+
+       bool dmm_workaround;
+       spinlock_t wa_lock;
+       u32 *wa_dma_data;
+       dma_addr_t wa_dma_handle;
+       struct dma_chan *wa_dma_chan;
 };
 
 #endif
index f92fe205550bc9c65df236e97be485fbb428ee87..252f5ebb1acc4830caf833e785df6a94f9cc7e3c 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/completion.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
@@ -79,14 +80,138 @@ static const u32 reg[][4] = {
                        DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
 };
 
+static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
+{
+       struct dma_device *dma_dev = dmm->wa_dma_chan->device;
+       struct dma_async_tx_descriptor *tx;
+       enum dma_status status;
+       dma_cookie_t cookie;
+
+       tx = dma_dev->device_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
+       if (!tx) {
+               dev_err(dmm->dev, "Failed to prepare DMA memcpy\n");
+               return -EIO;
+       }
+
+       cookie = tx->tx_submit(tx);
+       if (dma_submit_error(cookie)) {
+               dev_err(dmm->dev, "Failed to do DMA tx_submit\n");
+               return -EIO;
+       }
+
+       dma_async_issue_pending(dmm->wa_dma_chan);
+       status = dma_sync_wait(dmm->wa_dma_chan, cookie);
+       if (status != DMA_COMPLETE)
+               dev_err(dmm->dev, "i878 wa DMA copy failure\n");
+
+       dmaengine_terminate_all(dmm->wa_dma_chan);
+       return 0;
+}
+
+static u32 dmm_read_wa(struct dmm *dmm, u32 reg)
+{
+       dma_addr_t src, dst;
+       int r;
+
+       src = dmm->phys_base + reg;
+       dst = dmm->wa_dma_handle;
+
+       r = dmm_dma_copy(dmm, src, dst);
+       if (r) {
+               dev_err(dmm->dev, "sDMA read transfer timeout\n");
+               return readl(dmm->base + reg);
+       }
+
+       /*
+        * As per i878 workaround, the DMA is used to access the DMM registers.
+        * Make sure that the readl is not moved by the compiler or the CPU
+        * earlier than the DMA finished writing the value to memory.
+        */
+       rmb();
+       return readl(dmm->wa_dma_data);
+}
+
+static void dmm_write_wa(struct dmm *dmm, u32 val, u32 reg)
+{
+       dma_addr_t src, dst;
+       int r;
+
+       writel(val, dmm->wa_dma_data);
+       /*
+        * As per i878 workaround, the DMA is used to access the DMM registers.
+        * Make sure that the writel is not moved by the compiler or the CPU, so
+        * the data will be in place before we start the DMA to do the actual
+        * register write.
+        */
+       wmb();
+
+       src = dmm->wa_dma_handle;
+       dst = dmm->phys_base + reg;
+
+       r = dmm_dma_copy(dmm, src, dst);
+       if (r) {
+               dev_err(dmm->dev, "sDMA write transfer timeout\n");
+               writel(val, dmm->base + reg);
+       }
+}
+
 static u32 dmm_read(struct dmm *dmm, u32 reg)
 {
-       return readl(dmm->base + reg);
+       if (dmm->dmm_workaround) {
+               u32 v;
+               unsigned long flags;
+
+               spin_lock_irqsave(&dmm->wa_lock, flags);
+               v = dmm_read_wa(dmm, reg);
+               spin_unlock_irqrestore(&dmm->wa_lock, flags);
+
+               return v;
+       } else {
+               return readl(dmm->base + reg);
+       }
 }
 
 static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
 {
-       writel(val, dmm->base + reg);
+       if (dmm->dmm_workaround) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&dmm->wa_lock, flags);
+               dmm_write_wa(dmm, val, reg);
+               spin_unlock_irqrestore(&dmm->wa_lock, flags);
+       } else {
+               writel(val, dmm->base + reg);
+       }
+}
+
+static int dmm_workaround_init(struct dmm *dmm)
+{
+       dma_cap_mask_t mask;
+
+       spin_lock_init(&dmm->wa_lock);
+
+       dmm->wa_dma_data = dma_alloc_coherent(dmm->dev,  sizeof(u32),
+                                             &dmm->wa_dma_handle, GFP_KERNEL);
+       if (!dmm->wa_dma_data)
+               return -ENOMEM;
+
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_MEMCPY, mask);
+
+       dmm->wa_dma_chan = dma_request_channel(mask, NULL, NULL);
+       if (!dmm->wa_dma_chan) {
+               dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static void dmm_workaround_uninit(struct dmm *dmm)
+{
+       dma_release_channel(dmm->wa_dma_chan);
+
+       dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);
 }
 
 /* simple allocator to grab next 16 byte aligned memory from txn */
@@ -285,6 +410,17 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
        }
 
        txn->last_pat->next_pa = 0;
+       /* ensure that the written descriptors are visible to DMM */
+       wmb();
+
+       /*
+        * NOTE: the wmb() above should be enough, but there seems to be a bug
+        * in OMAP's memory barrier implementation, which in some rare cases may
+        * cause the writes not to be observable after wmb().
+        */
+
+       /* read back to ensure the data is in RAM */
+       readl(&txn->last_pat->next_pa);
 
        /* write to PAT_DESCR to clear out any pending transaction */
        dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
@@ -603,6 +739,10 @@ static int omap_dmm_remove(struct platform_device *dev)
        unsigned long flags;
 
        if (omap_dmm) {
+               /* Disable all enabled interrupts */
+               dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_CLR);
+               free_irq(omap_dmm->irq, omap_dmm);
+
                /* free all area regions */
                spin_lock_irqsave(&list_lock, flags);
                list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
@@ -625,8 +765,8 @@ static int omap_dmm_remove(struct platform_device *dev)
                if (omap_dmm->dummy_page)
                        __free_page(omap_dmm->dummy_page);
 
-               if (omap_dmm->irq > 0)
-                       free_irq(omap_dmm->irq, omap_dmm);
+               if (omap_dmm->dmm_workaround)
+                       dmm_workaround_uninit(omap_dmm);
 
                iounmap(omap_dmm->base);
                kfree(omap_dmm);
@@ -673,6 +813,7 @@ static int omap_dmm_probe(struct platform_device *dev)
                goto fail;
        }
 
+       omap_dmm->phys_base = mem->start;
        omap_dmm->base = ioremap(mem->start, SZ_2K);
 
        if (!omap_dmm->base) {
@@ -688,6 +829,22 @@ static int omap_dmm_probe(struct platform_device *dev)
 
        omap_dmm->dev = &dev->dev;
 
+       if (of_machine_is_compatible("ti,dra7")) {
+               /*
+                * DRA7 Errata i878 says that MPU should not be used to access
+                * RAM and DMM at the same time. As it's not possible to prevent
+                * MPU accessing RAM, we need to access DMM via a proxy.
+                */
+               if (!dmm_workaround_init(omap_dmm)) {
+                       omap_dmm->dmm_workaround = true;
+                       dev_info(&dev->dev,
+                               "workaround for errata i878 in use\n");
+               } else {
+                       dev_warn(&dev->dev,
+                                "failed to initialize work-around for i878\n");
+               }
+       }
+
        hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);
        omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
        omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
@@ -714,24 +871,6 @@ static int omap_dmm_probe(struct platform_device *dev)
        dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__0);
        dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__1);
 
-       ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
-                               "omap_dmm_irq_handler", omap_dmm);
-
-       if (ret) {
-               dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
-                       omap_dmm->irq, ret);
-               omap_dmm->irq = -1;
-               goto fail;
-       }
-
-       /* Enable all interrupts for each refill engine except
-        * ERR_LUT_MISS<n> (which is just advisory, and we don't care
-        * about because we want to be able to refill live scanout
-        * buffers for accelerated pan/scroll) and FILL_DSC<n> which
-        * we just generally don't care about.
-        */
-       dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
-
        omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
        if (!omap_dmm->dummy_page) {
                dev_err(&dev->dev, "could not allocate dummy page\n");
@@ -823,6 +962,24 @@ static int omap_dmm_probe(struct platform_device *dev)
                .p1.y = omap_dmm->container_height - 1,
        };
 
+       ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
+                               "omap_dmm_irq_handler", omap_dmm);
+
+       if (ret) {
+               dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
+                       omap_dmm->irq, ret);
+               omap_dmm->irq = -1;
+               goto fail;
+       }
+
+       /* Enable all interrupts for each refill engine except
+        * ERR_LUT_MISS<n> (which is just advisory, and we don't care
+        * about because we want to be able to refill live scanout
+        * buffers for accelerated pan/scroll) and FILL_DSC<n> which
+        * we just generally don't care about.
+        */
+       dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
+
        /* initialize all LUTs to dummy page entries */
        for (i = 0; i < omap_dmm->num_lut; i++) {
                area.tcm = omap_dmm->tcm[i];
index 5f98506ac2c5d976899d9704d322842ee583289a..5e67d58cbc281c1d8e2bd5b8ddd66f82073bdb7b 100644 (file)
@@ -439,7 +439,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
        args->size = omap_gem_mmap_size(obj);
        args->offset = omap_gem_mmap_offset(obj);
 
-       drm_gem_object_unreference_unlocked(obj);
+       drm_gem_object_put_unlocked(obj);
 
        return ret;
 }
@@ -614,7 +614,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
        omap_disconnect_pipelines(ddev);
 err_crtc_uninit:
        omap_crtc_pre_uninit(priv);
-       drm_dev_unref(ddev);
+       drm_dev_put(ddev);
        return ret;
 }
 
@@ -643,7 +643,7 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
        omap_disconnect_pipelines(ddev);
        omap_crtc_pre_uninit(priv);
 
-       drm_dev_unref(ddev);
+       drm_dev_put(ddev);
 }
 
 static int pdev_probe(struct platform_device *pdev)
index 9f1e3d8f8488c823cc9914e49559adae88613017..4d264fd554d8f7d80ba63011b9250b562db90253 100644 (file)
@@ -319,7 +319,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
 
 error:
        while (--i >= 0)
-               drm_gem_object_unreference_unlocked(bos[i]);
+               drm_gem_object_put_unlocked(bos[i]);
 
        return fb;
 }
index b445309b014313a77d84efd179743dadaa2721ac..aee99194499f1675be7e160b96241f1f62dbbcd1 100644 (file)
@@ -150,7 +150,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
                /* note: if fb creation failed, we can't rely on fb destroy
                 * to unref the bo:
                 */
-               drm_gem_object_unreference_unlocked(fbdev->bo);
+               drm_gem_object_put_unlocked(fbdev->bo);
                ret = PTR_ERR(fb);
                goto fail;
        }
index 4ba5d035c5909eed0f05b978324186e7ba1b46c0..8dcaf9f4aa75b9467105f12b33f50ba7b50980c3 100644 (file)
@@ -638,7 +638,7 @@ int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 
        *offset = omap_gem_mmap_offset(obj);
 
-       drm_gem_object_unreference_unlocked(obj);
+       drm_gem_object_put_unlocked(obj);
 
 fail:
        return ret;
@@ -1312,7 +1312,7 @@ int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
        }
 
        /* drop reference from allocate - handle holds it now */
-       drm_gem_object_unreference_unlocked(obj);
+       drm_gem_object_put_unlocked(obj);
 
        return 0;
 }
index ec04a69ade46bde5e4175740ecd6dccaca61ee6f..0f8b597ccd106e9c96d0514a87d2099e50ce05b5 100644 (file)
@@ -168,7 +168,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
                         * Importing dmabuf exported from out own gem increases
                         * refcount on gem itself instead of f_count of dmabuf.
                         */
-                       drm_gem_object_reference(obj);
+                       drm_gem_object_get(obj);
                        return obj;
                }
        }
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.h b/drivers/gpu/drm/omapdrm/tcm-sita.h
deleted file mode 100644 (file)
index 460e63d..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * SImple Tiler Allocator (SiTA) private structures.
- *
- * Copyright (C) 2009-2011 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Ravi Ramachandra <r.ramachandra@ti.com>
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- *
- * * Neither the name of Texas Instruments Incorporated nor the names of
- *   its contributors may be used to endorse or promote products derived
- *   from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _TCM_SITA_H
-#define _TCM_SITA_H
-
-#include "tcm.h"
-
-/* length between two coordinates */
-#define LEN(a, b) ((a) > (b) ? (a) - (b) + 1 : (b) - (a) + 1)
-
-enum criteria {
-       CR_MAX_NEIGHS           = 0x01,
-       CR_FIRST_FOUND          = 0x10,
-       CR_BIAS_HORIZONTAL      = 0x20,
-       CR_BIAS_VERTICAL        = 0x40,
-       CR_DIAGONAL_BALANCE     = 0x80
-};
-
-/* nearness to the beginning of the search field from 0 to 1000 */
-struct nearness_factor {
-       s32 x;
-       s32 y;
-};
-
-/*
- * Statistics on immediately neighboring slots.  Edge is the number of
- * border segments that are also border segments of the scan field.  Busy
- * refers to the number of neighbors that are occupied.
- */
-struct neighbor_stats {
-       u16 edge;
-       u16 busy;
-};
-
-/* structure to keep the score of a potential allocation */
-struct score {
-       struct nearness_factor  f;
-       struct neighbor_stats   n;
-       struct tcm_area         a;
-       u16    neighs;          /* number of busy neighbors */
-};
-
-struct sita_pvt {
-       spinlock_t lock;        /* spinlock to protect access */
-       struct tcm_pt div_pt;   /* divider point splitting container */
-       struct tcm_area ***map; /* pointers to the parent area for each slot */
-};
-
-/* assign coordinates to area */
-static inline
-void assign(struct tcm_area *a, u16 x0, u16 y0, u16 x1, u16 y1)
-{
-       a->p0.x = x0;
-       a->p0.y = y0;
-       a->p1.x = x1;
-       a->p1.y = y1;
-}
-
-#endif
index 0fb300d41a09c02508e70e89678695e09a8ff0f9..33e5332684882c51c5567dc375494aa42980973a 100644 (file)
@@ -554,29 +554,23 @@ static struct drm_driver tilcdc_driver = {
 static int tilcdc_pm_suspend(struct device *dev)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
-       struct tilcdc_drm_private *priv = ddev->dev_private;
+       int ret = 0;
 
-       priv->saved_state = drm_atomic_helper_suspend(ddev);
+       ret = drm_mode_config_helper_suspend(ddev);
 
        /* Select sleep pin state */
        pinctrl_pm_select_sleep_state(dev);
 
-       return 0;
+       return ret;
 }
 
 static int tilcdc_pm_resume(struct device *dev)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
-       struct tilcdc_drm_private *priv = ddev->dev_private;
-       int ret = 0;
 
        /* Select default pin state */
        pinctrl_pm_select_default_state(dev);
-
-       if (priv->saved_state)
-               ret = drm_atomic_helper_resume(ddev, priv->saved_state);
-
-       return ret;
+       return  drm_mode_config_helper_resume(ddev);
 }
 #endif
 
index ead5122166699be40787974f49f5c13a03491a90..62cea5ff5558b546b0b0adf413ea9a064cfe48a1 100644 (file)
@@ -70,9 +70,6 @@ struct tilcdc_drm_private {
        const uint32_t *pixelformats;
        uint32_t num_pixelformats;
 
-       /* The context for pm susped/resume cycle is stored here */
-       struct drm_atomic_state *saved_state;
-
 #ifdef CONFIG_CPU_FREQ
        struct notifier_block freq_transition;
 #endif
index a60e560804e0ad3c64c6cf2b632a8c5085b59fde..01fc670ce7a21978881c2603034c84142d943bea 100644 (file)
@@ -4,8 +4,8 @@
 
 ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
        ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
-       ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
-       ttm_bo_manager.o ttm_page_alloc_dma.o
+       ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o \
+       ttm_page_alloc_dma.o
 ttm-$(CONFIG_AGP) += ttm_agp_backend.o
 
 obj-$(CONFIG_DRM_TTM) += ttm.o
index 6fe91c1b692d6fd547fcfff08b1633c229e77b0a..a1d977fbade5502c8edad616e537702d75dc1673 100644 (file)
@@ -409,8 +409,7 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
        node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
        if (likely(node)) {
                bo = container_of(node, struct ttm_buffer_object, vma_node);
-               if (!kref_get_unless_zero(&bo->kref))
-                       bo = NULL;
+               bo = ttm_bo_get_unless_zero(bo);
        }
 
        drm_vma_offset_unlock_lookup(&bdev->vma_manager);
index 09b2aa08363e6b5ca7c13c25a72b4b73d080cdde..8841bd30e1e5cb48e7f88d6cfcf97d8b3cbe1d1b 100644 (file)
@@ -7,6 +7,8 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
            vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
            vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
            vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
-           vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o
+           vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
+           vmwgfx_validation.o \
+           ttm_object.o ttm_lock.o
 
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
similarity index 94%
rename from drivers/gpu/drm/ttm/ttm_lock.c
rename to drivers/gpu/drm/vmwgfx/ttm_lock.c
index 20694b8a01cac415581c181f9a4f0005d3517586..16b2083cb9d42e2e1f21e4a0c6bc87020a86dfb3 100644 (file)
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
 
-#include <drm/ttm/ttm_lock.h>
 #include <drm/ttm/ttm_module.h>
 #include <linux/atomic.h>
 #include <linux/errno.h>
 #include <linux/wait.h>
 #include <linux/sched/signal.h>
-#include <linux/module.h>
+#include "ttm_lock.h"
+#include "ttm_object.h"
 
 #define TTM_WRITE_LOCK_PENDING    (1 << 0)
 #define TTM_VT_LOCK_PENDING       (1 << 1)
@@ -52,7 +52,6 @@ void ttm_lock_init(struct ttm_lock *lock)
        lock->kill_takers = false;
        lock->signal = SIGKILL;
 }
-EXPORT_SYMBOL(ttm_lock_init);
 
 void ttm_read_unlock(struct ttm_lock *lock)
 {
@@ -61,7 +60,6 @@ void ttm_read_unlock(struct ttm_lock *lock)
                wake_up_all(&lock->queue);
        spin_unlock(&lock->lock);
 }
-EXPORT_SYMBOL(ttm_read_unlock);
 
 static bool __ttm_read_lock(struct ttm_lock *lock)
 {
@@ -92,7 +90,6 @@ int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
                wait_event(lock->queue, __ttm_read_lock(lock));
        return ret;
 }
-EXPORT_SYMBOL(ttm_read_lock);
 
 static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
 {
@@ -144,7 +141,6 @@ void ttm_write_unlock(struct ttm_lock *lock)
        wake_up_all(&lock->queue);
        spin_unlock(&lock->lock);
 }
-EXPORT_SYMBOL(ttm_write_unlock);
 
 static bool __ttm_write_lock(struct ttm_lock *lock)
 {
@@ -185,7 +181,6 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
 
        return ret;
 }
-EXPORT_SYMBOL(ttm_write_lock);
 
 static int __ttm_vt_unlock(struct ttm_lock *lock)
 {
@@ -262,14 +257,12 @@ int ttm_vt_lock(struct ttm_lock *lock,
 
        return ret;
 }
-EXPORT_SYMBOL(ttm_vt_lock);
 
 int ttm_vt_unlock(struct ttm_lock *lock)
 {
        return ttm_ref_object_base_unref(lock->vt_holder,
-                                        lock->base.hash.key, TTM_REF_USAGE);
+                                        lock->base.handle, TTM_REF_USAGE);
 }
-EXPORT_SYMBOL(ttm_vt_unlock);
 
 void ttm_suspend_unlock(struct ttm_lock *lock)
 {
@@ -278,7 +271,6 @@ void ttm_suspend_unlock(struct ttm_lock *lock)
        wake_up_all(&lock->queue);
        spin_unlock(&lock->lock);
 }
-EXPORT_SYMBOL(ttm_suspend_unlock);
 
 static bool __ttm_suspend_lock(struct ttm_lock *lock)
 {
@@ -300,4 +292,3 @@ void ttm_suspend_lock(struct ttm_lock *lock)
 {
        wait_event(lock->queue, __ttm_suspend_lock(lock));
 }
-EXPORT_SYMBOL(ttm_suspend_lock);
similarity index 90%
rename from drivers/gpu/drm/ttm/ttm_object.c
rename to drivers/gpu/drm/vmwgfx/ttm_object.c
index 74f1b1eb1f8ead99fda9955df5c6ade71038fe60..36990b80e7906a0ab52528479f4d295af393d9b7 100644 (file)
 
 #define pr_fmt(fmt) "[TTM] " fmt
 
-#include <drm/ttm/ttm_object.h>
 #include <drm/ttm/ttm_module.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
-#include <linux/module.h>
 #include <linux/atomic.h>
+#include "ttm_object.h"
 
 struct ttm_object_file {
        struct ttm_object_device *tdev;
@@ -95,6 +94,7 @@ struct ttm_object_device {
        struct dma_buf_ops ops;
        void (*dmabuf_release)(struct dma_buf *dma_buf);
        size_t dma_buf_size;
+       struct idr idr;
 };
 
 /**
@@ -172,14 +172,15 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
        base->ref_obj_release = ref_obj_release;
        base->object_type = object_type;
        kref_init(&base->refcount);
+       idr_preload(GFP_KERNEL);
        spin_lock(&tdev->object_lock);
-       ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
-                                           &base->hash,
-                                           (unsigned long)base, 31, 0, 0);
+       ret = idr_alloc(&tdev->idr, base, 0, 0, GFP_NOWAIT);
        spin_unlock(&tdev->object_lock);
-       if (unlikely(ret != 0))
-               goto out_err0;
+       idr_preload_end();
+       if (ret < 0)
+               return ret;
 
+       base->handle = ret;
        ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
        if (unlikely(ret != 0))
                goto out_err1;
@@ -189,12 +190,10 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
        return 0;
 out_err1:
        spin_lock(&tdev->object_lock);
-       (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+       idr_remove(&tdev->idr, base->handle);
        spin_unlock(&tdev->object_lock);
-out_err0:
        return ret;
 }
-EXPORT_SYMBOL(ttm_base_object_init);
 
 static void ttm_release_base(struct kref *kref)
 {
@@ -203,7 +202,7 @@ static void ttm_release_base(struct kref *kref)
        struct ttm_object_device *tdev = base->tfile->tdev;
 
        spin_lock(&tdev->object_lock);
-       (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+       idr_remove(&tdev->idr, base->handle);
        spin_unlock(&tdev->object_lock);
 
        /*
@@ -225,7 +224,41 @@ void ttm_base_object_unref(struct ttm_base_object **p_base)
 
        kref_put(&base->refcount, ttm_release_base);
 }
-EXPORT_SYMBOL(ttm_base_object_unref);
+
+/**
+ * ttm_base_object_noref_lookup - look up a base object without reference
+ * @tfile: The struct ttm_object_file the object is registered with.
+ * @key: The object handle.
+ *
+ * This function looks up a ttm base object and returns a pointer to it
+ * without refcounting the pointer. The returned pointer is only valid
+ * until ttm_base_object_noref_release() is called, and the object
+ * pointed to by the returned pointer may be doomed. Any persistent usage
+ * of the object requires a refcount to be taken using kref_get_unless_zero().
+ * Iff this function returns successfully it needs to be paired with
+ * ttm_base_object_noref_release() and no sleeping- or scheduling functions
+ * may be called inbetween these function callse.
+ *
+ * Return: A pointer to the object if successful or NULL otherwise.
+ */
+struct ttm_base_object *
+ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
+{
+       struct drm_hash_item *hash;
+       struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
+       int ret;
+
+       rcu_read_lock();
+       ret = drm_ht_find_item_rcu(ht, key, &hash);
+       if (ret) {
+               rcu_read_unlock();
+               return NULL;
+       }
+
+       __release(RCU);
+       return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
+}
+EXPORT_SYMBOL(ttm_base_object_noref_lookup);
 
 struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
                                               uint32_t key)
@@ -247,29 +280,21 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
 
        return base;
 }
-EXPORT_SYMBOL(ttm_base_object_lookup);
 
 struct ttm_base_object *
 ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
 {
-       struct ttm_base_object *base = NULL;
-       struct drm_hash_item *hash;
-       struct drm_open_hash *ht = &tdev->object_hash;
-       int ret;
+       struct ttm_base_object *base;
 
        rcu_read_lock();
-       ret = drm_ht_find_item_rcu(ht, key, &hash);
+       base = idr_find(&tdev->idr, key);
 
-       if (likely(ret == 0)) {
-               base = drm_hash_entry(hash, struct ttm_base_object, hash);
-               if (!kref_get_unless_zero(&base->refcount))
-                       base = NULL;
-       }
+       if (base && !kref_get_unless_zero(&base->refcount))
+               base = NULL;
        rcu_read_unlock();
 
        return base;
 }
-EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
 
 /**
  * ttm_ref_object_exists - Check whether a caller has a valid ref object
@@ -289,7 +314,7 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile,
        struct ttm_ref_object *ref;
 
        rcu_read_lock();
-       if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0))
+       if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
                goto out_false;
 
        /*
@@ -315,7 +340,6 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile,
        rcu_read_unlock();
        return false;
 }
-EXPORT_SYMBOL(ttm_ref_object_exists);
 
 int ttm_ref_object_add(struct ttm_object_file *tfile,
                       struct ttm_base_object *base,
@@ -340,7 +364,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
 
        while (ret == -EINVAL) {
                rcu_read_lock();
-               ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
+               ret = drm_ht_find_item_rcu(ht, base->handle, &hash);
 
                if (ret == 0) {
                        ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
@@ -364,7 +388,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
                        return -ENOMEM;
                }
 
-               ref->hash.key = base->hash.key;
+               ref->hash.key = base->handle;
                ref->obj = base;
                ref->tfile = tfile;
                ref->ref_type = ref_type;
@@ -391,9 +415,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
 
        return ret;
 }
-EXPORT_SYMBOL(ttm_ref_object_add);
 
-static void ttm_ref_object_release(struct kref *kref)
+static void __releases(tfile->lock) __acquires(tfile->lock)
+ttm_ref_object_release(struct kref *kref)
 {
        struct ttm_ref_object *ref =
            container_of(kref, struct ttm_ref_object, kref);
@@ -435,7 +459,6 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
        spin_unlock(&tfile->lock);
        return 0;
 }
-EXPORT_SYMBOL(ttm_ref_object_base_unref);
 
 void ttm_object_file_release(struct ttm_object_file **p_tfile)
 {
@@ -464,7 +487,6 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
 
        ttm_object_file_unref(&tfile);
 }
-EXPORT_SYMBOL(ttm_object_file_release);
 
 struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
                                             unsigned int hash_order)
@@ -499,7 +521,6 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
 
        return NULL;
 }
-EXPORT_SYMBOL(ttm_object_file_init);
 
 struct ttm_object_device *
 ttm_object_device_init(struct ttm_mem_global *mem_glob,
@@ -519,6 +540,7 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
        if (ret != 0)
                goto out_no_object_hash;
 
+       idr_init(&tdev->idr);
        tdev->ops = *ops;
        tdev->dmabuf_release = tdev->ops.release;
        tdev->ops.release = ttm_prime_dmabuf_release;
@@ -530,7 +552,6 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
        kfree(tdev);
        return NULL;
 }
-EXPORT_SYMBOL(ttm_object_device_init);
 
 void ttm_object_device_release(struct ttm_object_device **p_tdev)
 {
@@ -538,11 +559,12 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
 
        *p_tdev = NULL;
 
+       WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
+       idr_destroy(&tdev->idr);
        drm_ht_remove(&tdev->object_hash);
 
        kfree(tdev);
 }
-EXPORT_SYMBOL(ttm_object_device_release);
 
 /**
  * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
@@ -641,14 +663,13 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
 
        prime = (struct ttm_prime_object *) dma_buf->priv;
        base = &prime->base;
-       *handle = base->hash.key;
+       *handle = base->handle;
        ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
 
        dma_buf_put(dma_buf);
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
 
 /**
  * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
@@ -739,7 +760,6 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
                ttm_base_object_unref(&base);
        return ret;
 }
-EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
 
 /**
  * ttm_prime_object_init - Initialize a ttm_prime_object
@@ -772,4 +792,3 @@ int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
                                    ttm_prime_refcount_release,
                                    ref_obj_release);
 }
-EXPORT_SYMBOL(ttm_prime_object_init);
similarity index 94%
rename from include/drm/ttm/ttm_object.h
rename to drivers/gpu/drm/vmwgfx/ttm_object.h
index a98bfeb4239edf09be7d253c09a9211fcdc134c6..50d26c7ff42d1b93ca8e5cff4098713880ba2213 100644 (file)
@@ -42,8 +42,7 @@
 #include <linux/kref.h>
 #include <linux/rcupdate.h>
 #include <linux/dma-buf.h>
-
-#include "ttm_memory.h"
+#include <drm/ttm/ttm_memory.h>
 
 /**
  * enum ttm_ref_type
@@ -125,14 +124,14 @@ struct ttm_object_device;
 
 struct ttm_base_object {
        struct rcu_head rhead;
-       struct drm_hash_item hash;
-       enum ttm_object_type object_type;
-       bool shareable;
        struct ttm_object_file *tfile;
        struct kref refcount;
        void (*refcount_release) (struct ttm_base_object **base);
        void (*ref_obj_release) (struct ttm_base_object *base,
                                 enum ttm_ref_type ref_type);
+       u32 handle;
+       enum ttm_object_type object_type;
+       u32 shareable;
 };
 
 
@@ -351,4 +350,26 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
 
 #define ttm_prime_object_kfree(__obj, __prime)         \
        kfree_rcu(__obj, __prime.base.rhead)
+
+/*
+ * Extra memory required by the base object's idr storage, which is allocated
+ * separately from the base object itself. We estimate an on-average 128 bytes
+ * per idr.
+ */
+#define TTM_OBJ_EXTRA_SIZE 128
+
+struct ttm_base_object *
+ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key);
+
+/**
+ * ttm_base_object_noref_release - release a base object pointer looked up
+ * without reference
+ *
+ * Releases a base object pointer looked up with ttm_base_object_noref_lookup().
+ */
+static inline void ttm_base_object_noref_release(void)
+{
+       __acquire(RCU);
+       rcu_read_unlock();
+}
 #endif
index 2dda0334576154ff3c22d56c0a8813dfee375dcd..7ce1c2f87d9a7ccbce4c84596d1206afa8bdef1e 100644 (file)
@@ -30,7 +30,7 @@
 
 #include <drm/drmP.h>
 #include "vmwgfx_drv.h"
-#include "drm/ttm/ttm_object.h"
+#include "ttm_object.h"
 
 
 /**
@@ -441,7 +441,8 @@ static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
                struct_size = backend_size +
                        ttm_round_pot(sizeof(struct vmw_buffer_object));
                user_struct_size = backend_size +
-                       ttm_round_pot(sizeof(struct vmw_user_buffer_object));
+                 ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
+                                     TTM_OBJ_EXTRA_SIZE;
        }
 
        if (dev_priv->map_mode == vmw_dma_alloc_coherent)
@@ -631,7 +632,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
                *p_base = &user_bo->prime.base;
                kref_get(&(*p_base)->refcount);
        }
-       *handle = user_bo->prime.base.hash.key;
+       *handle = user_bo->prime.base.handle;
 
 out_no_base_object:
        return ret;
@@ -920,6 +921,47 @@ int vmw_user_bo_lookup(struct ttm_object_file *tfile,
        return 0;
 }
 
+/**
+ * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
+ * @tfile: The TTM object file the handle is registered with.
+ * @handle: The user buffer object handle.
+ *
+ * This function looks up a struct vmw_user_bo and returns a pointer to the
+ * struct vmw_buffer_object it derives from without refcounting the pointer.
+ * The returned pointer is only valid until vmw_user_bo_noref_release() is
+ * called, and the object pointed to by the returned pointer may be doomed.
+ * Any persistent usage of the object requires a refcount to be taken using
+ * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
+ * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
+ * or scheduling functions may be called inbetween these function calls.
+ *
+ * Return: A struct vmw_buffer_object pointer if successful or negative
+ * error pointer on failure.
+ */
+struct vmw_buffer_object *
+vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
+{
+       struct vmw_user_buffer_object *vmw_user_bo;
+       struct ttm_base_object *base;
+
+       base = ttm_base_object_noref_lookup(tfile, handle);
+       if (!base) {
+               DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+                         (unsigned long)handle);
+               return ERR_PTR(-ESRCH);
+       }
+
+       if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
+               ttm_base_object_noref_release();
+               DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+                         (unsigned long)handle);
+               return ERR_PTR(-EINVAL);
+       }
+
+       vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
+                                  prime.base);
+       return &vmw_user_bo->vbo;
+}
 
 /**
  * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
@@ -940,7 +982,7 @@ int vmw_user_bo_reference(struct ttm_object_file *tfile,
 
        user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
 
-       *handle = user_bo->prime.base.hash.key;
+       *handle = user_bo->prime.base.handle;
        return ttm_ref_object_add(tfile, &user_bo->prime.base,
                                  TTM_REF_USAGE, NULL, false);
 }
index e7e4655d3f36bf3680412b7e462cdb138fd13fbd..48d1380a952e2d918f379fc27b6aa83258607a85 100644 (file)
@@ -660,7 +660,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
 {
        struct vmw_cmdbuf_header *cur = man->cur;
 
-       WARN_ON(!mutex_is_locked(&man->cur_mutex));
+       lockdep_assert_held_once(&man->cur_mutex);
 
        if (!cur)
                return;
@@ -1045,7 +1045,7 @@ static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
 {
        struct vmw_cmdbuf_header *cur = man->cur;
 
-       WARN_ON(!mutex_is_locked(&man->cur_mutex));
+       lockdep_assert_held_once(&man->cur_mutex);
 
        WARN_ON(size > cur->reserved);
        man->cur_pos += size;
index 3b75af9bf85f353b3c7c0a6e1041dd119a92f5ce..4ac55fc2bf97021e36a2035dc5b5a830183c996c 100644 (file)
@@ -89,8 +89,7 @@ vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
        if (unlikely(ret != 0))
                return ERR_PTR(ret);
 
-       return vmw_resource_reference
-               (drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res);
+       return drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res;
 }
 
 /**
index 7c3cb8efd11a894ecdf5c3993761be809bf68e46..14bd760a62fd7fda3053d9d244826984f6c7b5f7 100644 (file)
@@ -217,9 +217,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
                }
        }
 
-
-
-       vmw_resource_activate(res, vmw_hw_context_destroy);
+       res->hw_destroy = vmw_hw_context_destroy;
        return 0;
 
 out_cotables:
@@ -274,7 +272,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
 
        vmw_fifo_commit(dev_priv, sizeof(*cmd));
        vmw_fifo_resource_inc(dev_priv);
-       vmw_resource_activate(res, vmw_hw_context_destroy);
+       res->hw_destroy = vmw_hw_context_destroy;
        return 0;
 
 out_early:
@@ -757,14 +755,10 @@ static int vmw_context_define(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       /*
-        * Approximate idr memory usage with 128 bytes. It will be limited
-        * by maximum number_of contexts anyway.
-        */
-
        if (unlikely(vmw_user_context_size == 0))
-               vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
-                 ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
+               vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
+                 ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
+                 + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
 
        ret = ttm_read_lock(&dev_priv->reservation_sem, true);
        if (unlikely(ret != 0))
@@ -809,7 +803,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
                goto out_err;
        }
 
-       arg->cid = ctx->base.hash.key;
+       arg->cid = ctx->base.handle;
 out_err:
        vmw_resource_unreference(&res);
 out_unlock:
@@ -867,9 +861,8 @@ struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
        if (cotable_type >= SVGA_COTABLE_DX10_MAX)
                return ERR_PTR(-EINVAL);
 
-       return vmw_resource_reference
-               (container_of(ctx, struct vmw_user_context, res)->
-                cotables[cotable_type]);
+       return container_of(ctx, struct vmw_user_context, res)->
+               cotables[cotable_type];
 }
 
 /**
index 1d45714e1d5a061d1da0d1c9b5bbf7a932a3926d..44f3f6f107d3c632caeac9e94a7b2f0035b22a97 100644 (file)
@@ -615,7 +615,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
        vcotbl->type = type;
        vcotbl->ctx = ctx;
 
-       vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy);
+       vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
 
        return &vcotbl->res;
 
index bb6dbbe188358f040c1a1e17425874b9b581e367..61a84b958d671671cb56c37b6e44a589989d026b 100644 (file)
@@ -30,9 +30,9 @@
 #include <drm/drmP.h>
 #include "vmwgfx_drv.h"
 #include "vmwgfx_binding.h"
+#include "ttm_object.h"
 #include <drm/ttm/ttm_placement.h>
 #include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_object.h>
 #include <drm/ttm/ttm_module.h>
 #include <linux/dma_remapping.h>
 
@@ -667,8 +667,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        mutex_init(&dev_priv->binding_mutex);
        mutex_init(&dev_priv->requested_layout_mutex);
        mutex_init(&dev_priv->global_kms_state_mutex);
-       rwlock_init(&dev_priv->resource_lock);
        ttm_lock_init(&dev_priv->reservation_sem);
+       spin_lock_init(&dev_priv->resource_lock);
        spin_lock_init(&dev_priv->hw_lock);
        spin_lock_init(&dev_priv->waiter_lock);
        spin_lock_init(&dev_priv->cap_lock);
index 1abe21758b0d7523a0b66631f615f55e0d07796a..59f614225bcd72b8a84a5de42813359e9a2760d0 100644 (file)
@@ -28,6 +28,7 @@
 #ifndef _VMWGFX_DRV_H_
 #define _VMWGFX_DRV_H_
 
+#include "vmwgfx_validation.h"
 #include "vmwgfx_reg.h"
 #include <drm/drmP.h>
 #include <drm/vmwgfx_drm.h>
 #include <drm/drm_auth.h>
 #include <linux/suspend.h>
 #include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_object.h>
-#include <drm/ttm/ttm_lock.h>
 #include <drm/ttm/ttm_execbuf_util.h>
 #include <drm/ttm/ttm_module.h>
 #include "vmwgfx_fence.h"
+#include "ttm_object.h"
+#include "ttm_lock.h"
 #include <linux/sync_file.h>
 
 #define VMWGFX_DRIVER_NAME "vmwgfx"
@@ -112,21 +113,49 @@ struct vmw_validate_buffer {
 };
 
 struct vmw_res_func;
+
+
+/**
+ * struct vmw-resource - base class for hardware resources
+ *
+ * @kref: For refcounting.
+ * @dev_priv: Pointer to the device private for this resource. Immutable.
+ * @id: Device id. Protected by @dev_priv::resource_lock.
+ * @backup_size: Backup buffer size. Immutable.
+ * @res_dirty: Resource contains data not yet in the backup buffer. Protected
+ * by resource reserved.
+ * @backup_dirty: Backup buffer contains data not yet in the HW resource.
+ * Protecte by resource reserved.
+ * @backup: The backup buffer if any. Protected by resource reserved.
+ * @backup_offset: Offset into the backup buffer if any. Protected by resource
+ * reserved. Note that only a few resource types can have a @backup_offset
+ * different from zero.
+ * @pin_count: The pin count for this resource. A pinned resource has a
+ * pin-count greater than zero. It is not on the resource LRU lists and its
+ * backup buffer is pinned. Hence it can't be evicted.
+ * @func: Method vtable for this resource. Immutable.
+ * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
+ * @mob_head: List head for the MOB backup list. Protected by @backup reserved.
+ * @binding_head: List head for the context binding list. Protected by
+ * the @dev_priv::binding_mutex
+ * @res_free: The resource destructor.
+ * @hw_destroy: Callback to destroy the resource on the device, as part of
+ * resource destruction.
+ */
 struct vmw_resource {
        struct kref kref;
        struct vmw_private *dev_priv;
        int id;
-       bool avail;
        unsigned long backup_size;
-       bool res_dirty; /* Protected by backup buffer reserved */
-       bool backup_dirty; /* Protected by backup buffer reserved */
+       bool res_dirty;
+       bool backup_dirty;
        struct vmw_buffer_object *backup;
        unsigned long backup_offset;
-       unsigned long pin_count; /* Protected by resource reserved */
+       unsigned long pin_count;
        const struct vmw_res_func *func;
-       struct list_head lru_head; /* Protected by the resource lock */
-       struct list_head mob_head; /* Protected by @backup reserved */
-       struct list_head binding_head; /* Protected by binding_mutex */
+       struct list_head lru_head;
+       struct list_head mob_head;
+       struct list_head binding_head;
        void (*res_free) (struct vmw_resource *res);
        void (*hw_destroy) (struct vmw_resource *res);
 };
@@ -204,29 +233,24 @@ struct vmw_fifo_state {
        bool dx;
 };
 
-struct vmw_relocation {
-       SVGAMobId *mob_loc;
-       SVGAGuestPtr *location;
-       uint32_t index;
-};
-
 /**
  * struct vmw_res_cache_entry - resource information cache entry
- *
+ * @handle: User-space handle of a resource.
+ * @res: Non-ref-counted pointer to the resource.
+ * @valid_handle: Whether the @handle member is valid.
  * @valid: Whether the entry is valid, which also implies that the execbuf
  * code holds a reference to the resource, and it's placed on the
  * validation list.
- * @handle: User-space handle of a resource.
- * @res: Non-ref-counted pointer to the resource.
  *
  * Used to avoid frequent repeated user-space handle lookups of the
  * same resource.
  */
 struct vmw_res_cache_entry {
-       bool valid;
        uint32_t handle;
        struct vmw_resource *res;
-       struct vmw_resource_val_node *node;
+       void *private;
+       unsigned short valid_handle;
+       unsigned short valid;
 };
 
 /**
@@ -291,35 +315,63 @@ enum vmw_display_unit_type {
        vmw_du_screen_target
 };
 
+struct vmw_validation_context;
+struct vmw_ctx_validation_info;
 
+/**
+ * struct vmw_sw_context - Command submission context
+ * @res_ht: Pointer hash table used to find validation duplicates
+ * @kernel: Whether the command buffer originates from kernel code rather
+ * than from user-space
+ * @fp: If @kernel is false, points to the file of the client. Otherwise
+ * NULL
+ * @cmd_bounce: Command bounce buffer used for command validation before
+ * copying to fifo space
+ * @cmd_bounce_size: Current command bounce buffer size
+ * @cur_query_bo: Current buffer object used as query result buffer
+ * @bo_relocations: List of buffer object relocations
+ * @res_relocations: List of resource relocations
+ * @buf_start: Pointer to start of memory where command validation takes
+ * place
+ * @res_cache: Cache of recently looked up resources
+ * @last_query_ctx: Last context that submitted a query
+ * @needs_post_query_barrier: Whether a query barrier is needed after
+ * command submission
+ * @staged_bindings: Cached per-context binding tracker
+ * @staged_bindings_inuse: Whether the cached per-context binding tracker
+ * is in use
+ * @staged_cmd_res: List of staged command buffer managed resources in this
+ * command buffer
+ * @ctx_list: List of context resources referenced in this command buffer
+ * @dx_ctx_node: Validation metadata of the current DX context
+ * @dx_query_mob: The MOB used for DX queries
+ * @dx_query_ctx: The DX context used for the last DX query
+ * @man: Pointer to the command buffer managed resource manager
+ * @ctx: The validation context
+ */
 struct vmw_sw_context{
        struct drm_open_hash res_ht;
        bool res_ht_initialized;
-       bool kernel; /**< is the called made from the kernel */
+       bool kernel;
        struct vmw_fpriv *fp;
-       struct list_head validate_nodes;
-       struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
-       uint32_t cur_reloc;
-       struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
-       uint32_t cur_val_buf;
        uint32_t *cmd_bounce;
        uint32_t cmd_bounce_size;
-       struct list_head resource_list;
-       struct list_head ctx_resource_list; /* For contexts and cotables */
        struct vmw_buffer_object *cur_query_bo;
+       struct list_head bo_relocations;
        struct list_head res_relocations;
        uint32_t *buf_start;
        struct vmw_res_cache_entry res_cache[vmw_res_max];
        struct vmw_resource *last_query_ctx;
        bool needs_post_query_barrier;
-       struct vmw_resource *error_resource;
        struct vmw_ctx_binding_state *staged_bindings;
        bool staged_bindings_inuse;
        struct list_head staged_cmd_res;
-       struct vmw_resource_val_node *dx_ctx_node;
+       struct list_head ctx_list;
+       struct vmw_ctx_validation_info *dx_ctx_node;
        struct vmw_buffer_object *dx_query_mob;
        struct vmw_resource *dx_query_ctx;
        struct vmw_cmdbuf_res_manager *man;
+       struct vmw_validation_context *ctx;
 };
 
 struct vmw_legacy_display;
@@ -444,7 +496,7 @@ struct vmw_private {
         * Context and surface management.
         */
 
-       rwlock_t resource_lock;
+       spinlock_t resource_lock;
        struct idr res_idr[vmw_res_max];
        /*
         * Block lastclose from racing with firstopen.
@@ -628,7 +680,7 @@ extern void vmw_resource_unreference(struct vmw_resource **p_res);
 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
 extern struct vmw_resource *
 vmw_resource_reference_unless_doomed(struct vmw_resource *res);
-extern int vmw_resource_validate(struct vmw_resource *res);
+extern int vmw_resource_validate(struct vmw_resource *res, bool intr);
 extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
                                bool no_backup);
 extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
@@ -643,6 +695,12 @@ extern int vmw_user_resource_lookup_handle(
        uint32_t handle,
        const struct vmw_user_resource_conv *converter,
        struct vmw_resource **p_res);
+extern struct vmw_resource *
+vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
+                                     struct ttm_object_file *tfile,
+                                     uint32_t handle,
+                                     const struct vmw_user_resource_conv *
+                                     converter);
 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv);
 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -661,6 +719,15 @@ extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
 extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
 extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
 
+/**
+ * vmw_user_resource_noref_release - release a user resource pointer looked up
+ * without reference
+ */
+static inline void vmw_user_resource_noref_release(void)
+{
+       ttm_base_object_noref_release();
+}
+
 /**
  * Buffer object helper functions - vmwgfx_bo.c
  */
@@ -717,6 +784,18 @@ extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
 extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
                               struct ttm_mem_reg *mem);
 extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
+extern struct vmw_buffer_object *
+vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle);
+
+/**
+ * vmw_user_bo_noref_release - release a buffer object pointer looked up
+ * without reference
+ */
+static inline void vmw_user_bo_noref_release(void)
+{
+       ttm_base_object_noref_release();
+}
+
 
 /**
  * Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -864,10 +943,6 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
                                        uint32_t fence_handle,
                                        int32_t out_fence_fd,
                                        struct sync_file *sync_file);
-extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
-                                     struct ttm_buffer_object *bo,
-                                     bool interruptible,
-                                     bool validate_as_mob);
 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
 
 /**
index f0ab6b2313bbed89f5879fd27a6e2c268fc8c613..5a6b70ba137aa321fe606946640464d861338087 100644 (file)
 
 #define VMW_RES_HT_ORDER 12
 
+/*
+ * struct vmw_relocation - Buffer object relocation
+ *
+ * @head: List head for the command submission context's relocation list
+ * @vbo: Non ref-counted pointer to buffer object
+ * @mob_loc: Pointer to location for mob id to be modified
+ * @location: Pointer to location for guest pointer to be modified
+ */
+struct vmw_relocation {
+       struct list_head head;
+       struct vmw_buffer_object *vbo;
+       union {
+               SVGAMobId *mob_loc;
+               SVGAGuestPtr *location;
+       };
+};
+
 /**
  * enum vmw_resource_relocation_type - Relocation type for resources
  *
@@ -69,35 +86,18 @@ struct vmw_resource_relocation {
        enum vmw_resource_relocation_type rel_type:3;
 };
 
-/**
- * struct vmw_resource_val_node - Validation info for resources
- *
- * @head: List head for the software context's resource list.
- * @hash: Hash entry for quick resouce to val_node lookup.
- * @res: Ref-counted pointer to the resource.
- * @switch_backup: Boolean whether to switch backup buffer on unreserve.
- * @new_backup: Refcounted pointer to the new backup buffer.
- * @staged_bindings: If @res is a context, tracks bindings set up during
- * the command batch. Otherwise NULL.
- * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
- * @first_usage: Set to true the first time the resource is referenced in
- * the command stream.
- * @switching_backup: The command stream provides a new backup buffer for a
- * resource.
- * @no_buffer_needed: This means @switching_backup is true on first buffer
- * reference. So resource reservation does not need to allocate a backup
- * buffer for the resource.
+/*
+ * struct vmw_ctx_validation_info - Extra validation metadata for contexts
+ * @head: List head of context list
+ * @ctx: The context resource
+ * @cur: The context's persistent binding state
+ * @staged: The binding state changes of this command buffer
  */
-struct vmw_resource_val_node {
+struct vmw_ctx_validation_info {
        struct list_head head;
-       struct drm_hash_item hash;
-       struct vmw_resource *res;
-       struct vmw_buffer_object *new_backup;
-       struct vmw_ctx_binding_state *staged_bindings;
-       unsigned long new_backup_offset;
-       u32 first_usage : 1;
-       u32 switching_backup : 1;
-       u32 no_buffer_needed : 1;
+       struct vmw_resource *ctx;
+       struct vmw_ctx_binding_state *cur;
+       struct vmw_ctx_binding_state *staged;
 };
 
 /**
@@ -127,10 +127,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
                                 struct vmw_sw_context *sw_context,
                                 SVGAMobId *id,
                                 struct vmw_buffer_object **vmw_bo_p);
-static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
-                                  struct vmw_buffer_object *vbo,
-                                  bool validate_as_mob,
-                                  uint32_t *p_val_node);
 /**
  * vmw_ptr_diff - Compute the offset from a to b in bytes
  *
@@ -145,48 +141,38 @@ static size_t vmw_ptr_diff(void *a, void *b)
 }
 
 /**
- * vmw_resources_unreserve - unreserve resources previously reserved for
- * command submission.
- *
- * @sw_context: pointer to the software context
- * @backoff: Whether command submission failed.
+ * vmw_execbuf_bindings_commit - Commit modified binding state
+ * @sw_context: The command submission context
+ * @backoff: Whether this is part of the error path and binding state
+ * changes should be ignored
  */
-static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
-                                   bool backoff)
+static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
+                                       bool backoff)
 {
-       struct vmw_resource_val_node *val;
-       struct list_head *list = &sw_context->resource_list;
+       struct vmw_ctx_validation_info *entry;
 
-       if (sw_context->dx_query_mob && !backoff)
-               vmw_context_bind_dx_query(sw_context->dx_query_ctx,
-                                         sw_context->dx_query_mob);
+       list_for_each_entry(entry, &sw_context->ctx_list, head) {
+               if (!backoff)
+                       vmw_binding_state_commit(entry->cur, entry->staged);
+               if (entry->staged != sw_context->staged_bindings)
+                       vmw_binding_state_free(entry->staged);
+               else
+                       sw_context->staged_bindings_inuse = false;
+       }
 
-       list_for_each_entry(val, list, head) {
-               struct vmw_resource *res = val->res;
-               bool switch_backup =
-                       (backoff) ? false : val->switching_backup;
-
-               /*
-                * Transfer staged context bindings to the
-                * persistent context binding tracker.
-                */
-               if (unlikely(val->staged_bindings)) {
-                       if (!backoff) {
-                               vmw_binding_state_commit
-                                       (vmw_context_binding_state(val->res),
-                                        val->staged_bindings);
-                       }
+       /* List entries are freed with the validation context */
+       INIT_LIST_HEAD(&sw_context->ctx_list);
+}
 
-                       if (val->staged_bindings != sw_context->staged_bindings)
-                               vmw_binding_state_free(val->staged_bindings);
-                       else
-                               sw_context->staged_bindings_inuse = false;
-                       val->staged_bindings = NULL;
-               }
-               vmw_resource_unreserve(res, switch_backup, val->new_backup,
-                                      val->new_backup_offset);
-               vmw_bo_unreference(&val->new_backup);
-       }
+/**
+ * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
+ * @sw_context: The command submission context
+ */
+static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
+{
+       if (sw_context->dx_query_mob)
+               vmw_context_bind_dx_query(sw_context->dx_query_ctx,
+                                         sw_context->dx_query_mob);
 }
 
 /**
@@ -194,16 +180,17 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
  * added to the validate list.
  *
  * @dev_priv: Pointer to the device private:
- * @sw_context: The validation context:
- * @node: The validation node holding this context.
+ * @sw_context: The command submission context
+ * @node: The validation node holding the context resource metadata
  */
 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
                                   struct vmw_sw_context *sw_context,
-                                  struct vmw_resource_val_node *node)
+                                  struct vmw_resource *res,
+                                  struct vmw_ctx_validation_info *node)
 {
        int ret;
 
-       ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
+       ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
        if (unlikely(ret != 0))
                goto out_err;
 
@@ -220,91 +207,138 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
        }
 
        if (sw_context->staged_bindings_inuse) {
-               node->staged_bindings = vmw_binding_state_alloc(dev_priv);
-               if (IS_ERR(node->staged_bindings)) {
+               node->staged = vmw_binding_state_alloc(dev_priv);
+               if (IS_ERR(node->staged)) {
                        DRM_ERROR("Failed to allocate context binding "
                                  "information.\n");
-                       ret = PTR_ERR(node->staged_bindings);
-                       node->staged_bindings = NULL;
+                       ret = PTR_ERR(node->staged);
+                       node->staged = NULL;
                        goto out_err;
                }
        } else {
-               node->staged_bindings = sw_context->staged_bindings;
+               node->staged = sw_context->staged_bindings;
                sw_context->staged_bindings_inuse = true;
        }
 
+       node->ctx = res;
+       node->cur = vmw_context_binding_state(res);
+       list_add_tail(&node->head, &sw_context->ctx_list);
+
        return 0;
 out_err:
        return ret;
 }
 
 /**
- * vmw_resource_val_add - Add a resource to the software context's
- * resource list if it's not already on it.
+ * vmw_execbuf_res_size - calculate extra size fore the resource validation
+ * node
+ * @dev_priv: Pointer to the device private struct.
+ * @res_type: The resource type.
  *
- * @sw_context: Pointer to the software context.
+ * Guest-backed contexts and DX contexts require extra size to store
+ * execbuf private information in the validation node. Typically the
+ * binding manager associated data structures.
+ *
+ * Returns: The extra size requirement based on resource type.
+ */
+static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
+                                        enum vmw_res_type res_type)
+{
+       return (res_type == vmw_res_dx_context ||
+               (res_type == vmw_res_context && dev_priv->has_mob)) ?
+               sizeof(struct vmw_ctx_validation_info) : 0;
+}
+
+/**
+ * vmw_execbuf_rcache_update - Update a resource-node cache entry
+ *
+ * @rcache: Pointer to the entry to update.
  * @res: Pointer to the resource.
- * @p_node On successful return points to a valid pointer to a
- * struct vmw_resource_val_node, if non-NULL on entry.
+ * @private: Pointer to the execbuf-private space in the resource
+ * validation node.
+ */
+static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
+                                     struct vmw_resource *res,
+                                     void *private)
+{
+       rcache->res = res;
+       rcache->private = private;
+       rcache->valid = 1;
+       rcache->valid_handle = 0;
+}
+
+/**
+ * vmw_execbuf_res_noref_val_add - Add a resource described by an
+ * unreferenced rcu-protected pointer to the validation list.
+ * @sw_context: Pointer to the software context.
+ * @res: Unreferenced rcu-protected pointer to the resource.
+ *
+ * Returns: 0 on success. Negative error code on failure. Typical error
+ * codes are %-EINVAL on inconsistency and %-ESRCH if the resource was
+ * doomed.
  */
-static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
-                               struct vmw_resource *res,
-                               struct vmw_resource_val_node **p_node)
+static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
+                                        struct vmw_resource *res)
 {
        struct vmw_private *dev_priv = res->dev_priv;
-       struct vmw_resource_val_node *node;
-       struct drm_hash_item *hash;
        int ret;
-
-       if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
-                                   &hash) == 0)) {
-               node = container_of(hash, struct vmw_resource_val_node, hash);
-               node->first_usage = false;
-               if (unlikely(p_node != NULL))
-                       *p_node = node;
+       enum vmw_res_type res_type = vmw_res_type(res);
+       struct vmw_res_cache_entry *rcache;
+       struct vmw_ctx_validation_info *ctx_info;
+       bool first_usage;
+       unsigned int priv_size;
+
+       rcache = &sw_context->res_cache[res_type];
+       if (likely(rcache->valid && rcache->res == res)) {
+               vmw_user_resource_noref_release();
                return 0;
        }
 
-       node = kzalloc(sizeof(*node), GFP_KERNEL);
-       if (unlikely(!node)) {
-               DRM_ERROR("Failed to allocate a resource validation "
-                         "entry.\n");
-               return -ENOMEM;
-       }
-
-       node->hash.key = (unsigned long) res;
-       ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Failed to initialize a resource validation "
-                         "entry.\n");
-               kfree(node);
+       priv_size = vmw_execbuf_res_size(dev_priv, res_type);
+       ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
+                                         (void **)&ctx_info, &first_usage);
+       vmw_user_resource_noref_release();
+       if (ret)
                return ret;
+
+       if (priv_size && first_usage) {
+               ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
+                                             ctx_info);
+               if (ret)
+                       return ret;
        }
-       node->res = vmw_resource_reference(res);
-       node->first_usage = true;
-       if (unlikely(p_node != NULL))
-               *p_node = node;
 
-       if (!dev_priv->has_mob) {
-               list_add_tail(&node->head, &sw_context->resource_list);
+       vmw_execbuf_rcache_update(rcache, res, ctx_info);
+       return 0;
+}
+
+/**
+ * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
+ * validation list if it's not already on it
+ * @sw_context: Pointer to the software context.
+ * @res: Pointer to the resource.
+ *
+ * Returns: Zero on success. Negative error code on failure.
+ */
+static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
+                                        struct vmw_resource *res)
+{
+       struct vmw_res_cache_entry *rcache;
+       enum vmw_res_type res_type = vmw_res_type(res);
+       void *ptr;
+       int ret;
+
+       rcache = &sw_context->res_cache[res_type];
+       if (likely(rcache->valid && rcache->res == res))
                return 0;
-       }
 
-       switch (vmw_res_type(res)) {
-       case vmw_res_context:
-       case vmw_res_dx_context:
-               list_add(&node->head, &sw_context->ctx_resource_list);
-               ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
-               break;
-       case vmw_res_cotable:
-               list_add_tail(&node->head, &sw_context->ctx_resource_list);
-               break;
-       default:
-               list_add_tail(&node->head, &sw_context->resource_list);
-               break;
-       }
+       ret = vmw_validation_add_resource(sw_context->ctx, res, 0, &ptr, NULL);
+       if (ret)
+               return ret;
 
-       return ret;
+       vmw_execbuf_rcache_update(rcache, res, ptr);
+
+       return 0;
 }
 
 /**
@@ -325,11 +359,11 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
         * First add the resource the view is pointing to, otherwise
         * it may be swapped out when the view is validated.
         */
-       ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
+       ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view));
        if (ret)
                return ret;
 
-       return vmw_resource_val_add(sw_context, view, NULL);
+       return vmw_execbuf_res_noctx_val_add(sw_context, view);
 }
 
 /**
@@ -342,28 +376,33 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
  *
  * The view is represented by a view id and the DX context it's created on,
  * or scheduled for creation on. If there is no DX context set, the function
- * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
+ * will return an -EINVAL error pointer.
+ *
+ * Returns: Unreferenced pointer to the resource on success, negative error
+ * pointer on failure.
  */
-static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
-                              enum vmw_view_type view_type, u32 id)
+static struct vmw_resource *
+vmw_view_id_val_add(struct vmw_sw_context *sw_context,
+                   enum vmw_view_type view_type, u32 id)
 {
-       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
        struct vmw_resource *view;
        int ret;
 
        if (!ctx_node) {
                DRM_ERROR("DX Context not set.\n");
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
        }
 
        view = vmw_view_lookup(sw_context->man, view_type, id);
        if (IS_ERR(view))
-               return PTR_ERR(view);
+               return view;
 
        ret = vmw_view_res_val_add(sw_context, view);
-       vmw_resource_unreference(&view);
+       if (ret)
+               return ERR_PTR(ret);
 
-       return ret;
+       return view;
 }
 
 /**
@@ -394,8 +433,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
                        if (IS_ERR(res))
                                continue;
 
-                       ret = vmw_resource_val_add(sw_context, res, NULL);
-                       vmw_resource_unreference(&res);
+                       ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
                        if (unlikely(ret != 0))
                                return ret;
                }
@@ -407,17 +445,11 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
        binding_list = vmw_context_binding_list(ctx);
 
        list_for_each_entry(entry, binding_list, ctx_list) {
-               /* entry->res is not refcounted */
-               res = vmw_resource_reference_unless_doomed(entry->res);
-               if (unlikely(res == NULL))
-                       continue;
-
                if (vmw_res_type(entry->res) == vmw_res_view)
                        ret = vmw_view_res_val_add(sw_context, entry->res);
                else
-                       ret = vmw_resource_val_add(sw_context, entry->res,
-                                                  NULL);
-               vmw_resource_unreference(&res);
+                       ret = vmw_execbuf_res_noctx_val_add(sw_context,
+                                                           entry->res);
                if (unlikely(ret != 0))
                        break;
        }
@@ -427,9 +459,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 
                dx_query_mob = vmw_context_get_dx_query_mob(ctx);
                if (dx_query_mob)
-                       ret = vmw_bo_to_validate_list(sw_context,
-                                                     dx_query_mob,
-                                                     true, NULL);
+                       ret = vmw_validation_add_bo(sw_context->ctx,
+                                                   dx_query_mob, true, false);
        }
 
        mutex_unlock(&dev_priv->binding_mutex);
@@ -445,7 +476,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
  * id that needs fixup is located. Granularity is one byte.
  * @rel_type: Relocation type.
  */
-static int vmw_resource_relocation_add(struct list_head *list,
+static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
                                       const struct vmw_resource *res,
                                       unsigned long offset,
                                       enum vmw_resource_relocation_type
@@ -453,7 +484,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
 {
        struct vmw_resource_relocation *rel;
 
-       rel = kmalloc(sizeof(*rel), GFP_KERNEL);
+       rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
        if (unlikely(!rel)) {
                DRM_ERROR("Failed to allocate a resource relocation.\n");
                return -ENOMEM;
@@ -462,7 +493,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
        rel->res = res;
        rel->offset = offset;
        rel->rel_type = rel_type;
-       list_add_tail(&rel->head, list);
+       list_add_tail(&rel->head, &sw_context->res_relocations);
 
        return 0;
 }
@@ -470,16 +501,13 @@ static int vmw_resource_relocation_add(struct list_head *list,
 /**
  * vmw_resource_relocations_free - Free all relocations on a list
  *
- * @list: Pointer to the head of the relocation list.
+ * @list: Pointer to the head of the relocation list
  */
 static void vmw_resource_relocations_free(struct list_head *list)
 {
-       struct vmw_resource_relocation *rel, *n;
+       /* Memory is validation context memory, so no need to free it */
 
-       list_for_each_entry_safe(rel, n, list, head) {
-               list_del(&rel->head);
-               kfree(rel);
-       }
+       INIT_LIST_HEAD(list);
 }
 
 /**
@@ -531,68 +559,6 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
        return 0;
 }
 
-/**
- * vmw_bo_to_validate_list - add a bo to a validate list
- *
- * @sw_context: The software context used for this command submission batch.
- * @bo: The buffer object to add.
- * @validate_as_mob: Validate this buffer as a MOB.
- * @p_val_node: If non-NULL Will be updated with the validate node number
- * on return.
- *
- * Returns -EINVAL if the limit of number of buffer objects per command
- * submission is reached.
- */
-static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
-                                  struct vmw_buffer_object *vbo,
-                                  bool validate_as_mob,
-                                  uint32_t *p_val_node)
-{
-       uint32_t val_node;
-       struct vmw_validate_buffer *vval_buf;
-       struct ttm_validate_buffer *val_buf;
-       struct drm_hash_item *hash;
-       int ret;
-
-       if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
-                                   &hash) == 0)) {
-               vval_buf = container_of(hash, struct vmw_validate_buffer,
-                                       hash);
-               if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
-                       DRM_ERROR("Inconsistent buffer usage.\n");
-                       return -EINVAL;
-               }
-               val_buf = &vval_buf->base;
-               val_node = vval_buf - sw_context->val_bufs;
-       } else {
-               val_node = sw_context->cur_val_buf;
-               if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
-                       DRM_ERROR("Max number of DMA buffers per submission "
-                                 "exceeded.\n");
-                       return -EINVAL;
-               }
-               vval_buf = &sw_context->val_bufs[val_node];
-               vval_buf->hash.key = (unsigned long) vbo;
-               ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
-               if (unlikely(ret != 0)) {
-                       DRM_ERROR("Failed to initialize a buffer validation "
-                                 "entry.\n");
-                       return ret;
-               }
-               ++sw_context->cur_val_buf;
-               val_buf = &vval_buf->base;
-               val_buf->bo = ttm_bo_reference(&vbo->base);
-               val_buf->shared = false;
-               list_add_tail(&val_buf->head, &sw_context->validate_nodes);
-               vval_buf->validate_as_mob = validate_as_mob;
-       }
-
-       if (p_val_node)
-               *p_val_node = val_node;
-
-       return 0;
-}
-
 /**
  * vmw_resources_reserve - Reserve all resources on the sw_context's
  * resource list.
@@ -605,27 +571,11 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
  */
 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
 {
-       struct vmw_resource_val_node *val;
-       int ret = 0;
-
-       list_for_each_entry(val, &sw_context->resource_list, head) {
-               struct vmw_resource *res = val->res;
-
-               ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
-               if (unlikely(ret != 0))
-                       return ret;
-
-               if (res->backup) {
-                       struct vmw_buffer_object *vbo = res->backup;
-
-                       ret = vmw_bo_to_validate_list
-                               (sw_context, vbo,
-                                vmw_resource_needs_backup(res), NULL);
+       int ret;
 
-                       if (unlikely(ret != 0))
-                               return ret;
-               }
-       }
+       ret = vmw_validation_res_reserve(sw_context->ctx, true);
+       if (ret)
+               return ret;
 
        if (sw_context->dx_query_mob) {
                struct vmw_buffer_object *expected_dx_query_mob;
@@ -641,87 +591,6 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
        return ret;
 }
 
-/**
- * vmw_resources_validate - Validate all resources on the sw_context's
- * resource list.
- *
- * @sw_context: Pointer to the software context.
- *
- * Before this function is called, all resource backup buffers must have
- * been validated.
- */
-static int vmw_resources_validate(struct vmw_sw_context *sw_context)
-{
-       struct vmw_resource_val_node *val;
-       int ret;
-
-       list_for_each_entry(val, &sw_context->resource_list, head) {
-               struct vmw_resource *res = val->res;
-               struct vmw_buffer_object *backup = res->backup;
-
-               ret = vmw_resource_validate(res);
-               if (unlikely(ret != 0)) {
-                       if (ret != -ERESTARTSYS)
-                               DRM_ERROR("Failed to validate resource.\n");
-                       return ret;
-               }
-
-               /* Check if the resource switched backup buffer */
-               if (backup && res->backup && (backup != res->backup)) {
-                       struct vmw_buffer_object *vbo = res->backup;
-
-                       ret = vmw_bo_to_validate_list
-                               (sw_context, vbo,
-                                vmw_resource_needs_backup(res), NULL);
-                       if (ret) {
-                               ttm_bo_unreserve(&vbo->base);
-                               return ret;
-                       }
-               }
-       }
-       return 0;
-}
-
-/**
- * vmw_cmd_res_reloc_add - Add a resource to a software context's
- * relocation- and validation lists.
- *
- * @dev_priv: Pointer to a struct vmw_private identifying the device.
- * @sw_context: Pointer to the software context.
- * @id_loc: Pointer to where the id that needs translation is located.
- * @res: Valid pointer to a struct vmw_resource.
- * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
- * used for this resource is returned here.
- */
-static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
-                                struct vmw_sw_context *sw_context,
-                                uint32_t *id_loc,
-                                struct vmw_resource *res,
-                                struct vmw_resource_val_node **p_val)
-{
-       int ret;
-       struct vmw_resource_val_node *node;
-
-       *p_val = NULL;
-       ret = vmw_resource_relocation_add(&sw_context->res_relocations,
-                                         res,
-                                         vmw_ptr_diff(sw_context->buf_start,
-                                                      id_loc),
-                                         vmw_res_rel_normal);
-       if (unlikely(ret != 0))
-               return ret;
-
-       ret = vmw_resource_val_add(sw_context, res, &node);
-       if (unlikely(ret != 0))
-               return ret;
-
-       if (p_val)
-               *p_val = node;
-
-       return 0;
-}
-
-
 /**
  * vmw_cmd_res_check - Check that a resource is present and if so, put it
  * on the resource validate list unless it's already there.
@@ -741,17 +610,16 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
                  enum vmw_res_type res_type,
                  const struct vmw_user_resource_conv *converter,
                  uint32_t *id_loc,
-                 struct vmw_resource_val_node **p_val)
+                 struct vmw_resource **p_res)
 {
-       struct vmw_res_cache_entry *rcache =
-               &sw_context->res_cache[res_type];
+       struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
        struct vmw_resource *res;
-       struct vmw_resource_val_node *node;
        int ret;
 
+       if (p_res)
+               *p_res = NULL;
+
        if (*id_loc == SVGA3D_INVALID_ID) {
-               if (p_val)
-                       *p_val = NULL;
                if (res_type == vmw_res_context) {
                        DRM_ERROR("Illegal context invalid id.\n");
                        return -EINVAL;
@@ -759,56 +627,41 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
                return 0;
        }
 
-       /*
-        * Fastpath in case of repeated commands referencing the same
-        * resource
-        */
+       if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
+               res = rcache->res;
+       } else {
+               unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
 
-       if (likely(rcache->valid && *id_loc == rcache->handle)) {
-               const struct vmw_resource *res = rcache->res;
+               ret = vmw_validation_preload_res(sw_context->ctx, size);
+               if (ret)
+                       return ret;
 
-               rcache->node->first_usage = false;
-               if (p_val)
-                       *p_val = rcache->node;
+               res = vmw_user_resource_noref_lookup_handle
+                       (dev_priv, sw_context->fp->tfile, *id_loc, converter);
+               if (unlikely(IS_ERR(res))) {
+                       DRM_ERROR("Could not find or use resource 0x%08x.\n",
+                                 (unsigned int) *id_loc);
+                       return PTR_ERR(res);
+               }
 
-               return vmw_resource_relocation_add
-                       (&sw_context->res_relocations, res,
-                        vmw_ptr_diff(sw_context->buf_start, id_loc),
-                        vmw_res_rel_normal);
-       }
+               ret = vmw_execbuf_res_noref_val_add(sw_context, res);
+               if (unlikely(ret != 0))
+                       return ret;
 
-       ret = vmw_user_resource_lookup_handle(dev_priv,
-                                             sw_context->fp->tfile,
-                                             *id_loc,
-                                             converter,
-                                             &res);
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Could not find or use resource 0x%08x.\n",
-                         (unsigned) *id_loc);
-               dump_stack();
-               return ret;
+               if (rcache->valid && rcache->res == res) {
+                       rcache->valid_handle = true;
+                       rcache->handle = *id_loc;
+               }
        }
 
-       rcache->valid = true;
-       rcache->res = res;
-       rcache->handle = *id_loc;
-
-       ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
-                                   res, &node);
-       if (unlikely(ret != 0))
-               goto out_no_reloc;
+       ret = vmw_resource_relocation_add(sw_context, res,
+                                         vmw_ptr_diff(sw_context->buf_start,
+                                                      id_loc),
+                                         vmw_res_rel_normal);
+       if (p_res)
+               *p_res = res;
 
-       rcache->node = node;
-       if (p_val)
-               *p_val = node;
-       vmw_resource_unreference(&res);
        return 0;
-
-out_no_reloc:
-       BUG_ON(sw_context->error_resource != NULL);
-       sw_context->error_resource = res;
-
-       return ret;
 }
 
 /**
@@ -861,22 +714,18 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
  */
 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
 {
-       struct vmw_resource_val_node *val;
+       struct vmw_ctx_validation_info *val;
        int ret;
 
-       list_for_each_entry(val, &sw_context->resource_list, head) {
-               if (unlikely(!val->staged_bindings))
-                       break;
-
-               ret = vmw_binding_rebind_all
-                       (vmw_context_binding_state(val->res));
+       list_for_each_entry(val, &sw_context->ctx_list, head) {
+               ret = vmw_binding_rebind_all(val->cur);
                if (unlikely(ret != 0)) {
                        if (ret != -ERESTARTSYS)
                                DRM_ERROR("Failed to rebind context.\n");
                        return ret;
                }
 
-               ret = vmw_rebind_all_dx_query(val->res);
+               ret = vmw_rebind_all_dx_query(val->ctx);
                if (ret != 0)
                        return ret;
        }
@@ -903,45 +752,33 @@ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
                                 uint32 view_ids[], u32 num_views,
                                 u32 first_slot)
 {
-       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
-       struct vmw_cmdbuf_res_manager *man;
+       struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
        u32 i;
-       int ret;
 
        if (!ctx_node) {
                DRM_ERROR("DX Context not set.\n");
                return -EINVAL;
        }
 
-       man = sw_context->man;
        for (i = 0; i < num_views; ++i) {
                struct vmw_ctx_bindinfo_view binding;
                struct vmw_resource *view = NULL;
 
                if (view_ids[i] != SVGA3D_INVALID_ID) {
-                       view = vmw_view_lookup(man, view_type, view_ids[i]);
+                       view = vmw_view_id_val_add(sw_context, view_type,
+                                                  view_ids[i]);
                        if (IS_ERR(view)) {
                                DRM_ERROR("View not found.\n");
                                return PTR_ERR(view);
                        }
-
-                       ret = vmw_view_res_val_add(sw_context, view);
-                       if (ret) {
-                               DRM_ERROR("Could not add view to "
-                                         "validation list.\n");
-                               vmw_resource_unreference(&view);
-                               return ret;
-                       }
                }
-               binding.bi.ctx = ctx_node->res;
+               binding.bi.ctx = ctx_node->ctx;
                binding.bi.res = view;
                binding.bi.bt = binding_type;
                binding.shader_slot = shader_slot;
                binding.slot = first_slot + i;
-               vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+               vmw_binding_add(ctx_node->staged, &binding.bi,
                                shader_slot, binding.slot);
-               if (view)
-                       vmw_resource_unreference(&view);
        }
 
        return 0;
@@ -971,6 +808,34 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
                                 user_context_converter, &cmd->cid, NULL);
 }
 
+/**
+ * vmw_execbuf_info_from_res - Get the private validation metadata for a
+ * recently validated resource
+ * @sw_context: Pointer to the command submission context
+ * @res: The resource
+ *
+ * The resource pointed to by @res needs to be present in the command submission
+ * context's resource cache and hence the last resource of that type to be
+ * processed by the validation code.
+ *
+ * Return: a pointer to the private metadata of the resource, or NULL
+ * if it wasn't found
+ */
+static struct vmw_ctx_validation_info *
+vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
+                         struct vmw_resource *res)
+{
+       struct vmw_res_cache_entry *rcache =
+               &sw_context->res_cache[vmw_res_type(res)];
+
+       if (rcache->valid && rcache->res == res)
+               return rcache->private;
+
+       WARN_ON_ONCE(true);
+       return NULL;
+}
+
+
 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
                                           struct vmw_sw_context *sw_context,
                                           SVGA3dCmdHeader *header)
@@ -979,8 +844,8 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
                SVGA3dCmdHeader header;
                SVGA3dCmdSetRenderTarget body;
        } *cmd;
-       struct vmw_resource_val_node *ctx_node;
-       struct vmw_resource_val_node *res_node;
+       struct vmw_resource *ctx;
+       struct vmw_resource *res;
        int ret;
 
        cmd = container_of(header, struct vmw_sid_cmd, header);
@@ -993,25 +858,29 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
                                user_context_converter, &cmd->body.cid,
-                               &ctx_node);
+                               &ctx);
        if (unlikely(ret != 0))
                return ret;
 
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
-                               user_surface_converter,
-                               &cmd->body.target.sid, &res_node);
-       if (unlikely(ret != 0))
+                               user_surface_converter, &cmd->body.target.sid,
+                               &res);
+       if (unlikely(ret))
                return ret;
 
        if (dev_priv->has_mob) {
                struct vmw_ctx_bindinfo_view binding;
+               struct vmw_ctx_validation_info *node;
 
-               binding.bi.ctx = ctx_node->res;
-               binding.bi.res = res_node ? res_node->res : NULL;
+               node = vmw_execbuf_info_from_res(sw_context, ctx);
+               if (!node)
+                       return -EINVAL;
+
+               binding.bi.ctx = ctx;
+               binding.bi.res = res;
                binding.bi.bt = vmw_ctx_binding_rt;
                binding.slot = cmd->body.type;
-               vmw_binding_add(ctx_node->staged_bindings,
-                               &binding.bi, 0, binding.slot);
+               vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
        }
 
        return 0;
@@ -1030,8 +899,8 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
        cmd = container_of(header, struct vmw_sid_cmd, header);
 
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
-                               user_surface_converter,
-                               &cmd->body.src.sid, NULL);
+                                         user_surface_converter,
+                                         &cmd->body.src.sid, NULL);
        if (ret)
                return ret;
 
@@ -1171,17 +1040,17 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 
                if (unlikely(sw_context->cur_query_bo != NULL)) {
                        sw_context->needs_post_query_barrier = true;
-                       ret = vmw_bo_to_validate_list(sw_context,
-                                                     sw_context->cur_query_bo,
-                                                     dev_priv->has_mob, NULL);
+                       ret = vmw_validation_add_bo(sw_context->ctx,
+                                                   sw_context->cur_query_bo,
+                                                   dev_priv->has_mob, false);
                        if (unlikely(ret != 0))
                                return ret;
                }
                sw_context->cur_query_bo = new_query_bo;
 
-               ret = vmw_bo_to_validate_list(sw_context,
-                                             dev_priv->dummy_query_bo,
-                                             dev_priv->has_mob, NULL);
+               ret = vmw_validation_add_bo(sw_context->ctx,
+                                           dev_priv->dummy_query_bo,
+                                           dev_priv->has_mob, false);
                if (unlikely(ret != 0))
                        return ret;
 
@@ -1269,7 +1138,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
  * @sw_context: The software context used for this command batch validation.
  * @id: Pointer to the user-space handle to be translated.
  * @vmw_bo_p: Points to a location that, on successful return will carry
- * a reference-counted pointer to the DMA buffer identified by the
+ * a non-reference-counted pointer to the buffer object identified by the
  * user-space handle in @id.
  *
  * This function saves information needed to translate a user-space buffer
@@ -1284,40 +1153,34 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
                                 SVGAMobId *id,
                                 struct vmw_buffer_object **vmw_bo_p)
 {
-       struct vmw_buffer_object *vmw_bo = NULL;
+       struct vmw_buffer_object *vmw_bo;
        uint32_t handle = *id;
        struct vmw_relocation *reloc;
        int ret;
 
-       ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
-       if (unlikely(ret != 0)) {
+       vmw_validation_preload_bo(sw_context->ctx);
+       vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
+       if (IS_ERR(vmw_bo)) {
                DRM_ERROR("Could not find or use MOB buffer.\n");
-               ret = -EINVAL;
-               goto out_no_reloc;
+               return PTR_ERR(vmw_bo);
        }
 
-       if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
-               DRM_ERROR("Max number relocations per submission"
-                         " exceeded\n");
-               ret = -EINVAL;
-               goto out_no_reloc;
-       }
+       ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
+       vmw_user_bo_noref_release();
+       if (unlikely(ret != 0))
+               return ret;
 
-       reloc = &sw_context->relocs[sw_context->cur_reloc++];
-       reloc->mob_loc = id;
-       reloc->location = NULL;
+       reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
+       if (!reloc)
+               return -ENOMEM;
 
-       ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
-       if (unlikely(ret != 0))
-               goto out_no_reloc;
+       reloc->mob_loc = id;
+       reloc->vbo = vmw_bo;
 
        *vmw_bo_p = vmw_bo;
-       return 0;
+       list_add_tail(&reloc->head, &sw_context->bo_relocations);
 
-out_no_reloc:
-       vmw_bo_unreference(&vmw_bo);
-       *vmw_bo_p = NULL;
-       return ret;
+       return 0;
 }
 
 /**
@@ -1328,7 +1191,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
  * @sw_context: The software context used for this command batch validation.
  * @ptr: Pointer to the user-space handle to be translated.
  * @vmw_bo_p: Points to a location that, on successful return will carry
- * a reference-counted pointer to the DMA buffer identified by the
+ * a non-reference-counted pointer to the DMA buffer identified by the
  * user-space handle in @id.
  *
  * This function saves information needed to translate a user-space buffer
@@ -1344,39 +1207,33 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
                                   SVGAGuestPtr *ptr,
                                   struct vmw_buffer_object **vmw_bo_p)
 {
-       struct vmw_buffer_object *vmw_bo = NULL;
+       struct vmw_buffer_object *vmw_bo;
        uint32_t handle = ptr->gmrId;
        struct vmw_relocation *reloc;
        int ret;
 
-       ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
-       if (unlikely(ret != 0)) {
+       vmw_validation_preload_bo(sw_context->ctx);
+       vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
+       if (IS_ERR(vmw_bo)) {
                DRM_ERROR("Could not find or use GMR region.\n");
-               ret = -EINVAL;
-               goto out_no_reloc;
-       }
-
-       if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
-               DRM_ERROR("Max number relocations per submission"
-                         " exceeded\n");
-               ret = -EINVAL;
-               goto out_no_reloc;
+               return PTR_ERR(vmw_bo);
        }
 
-       reloc = &sw_context->relocs[sw_context->cur_reloc++];
-       reloc->location = ptr;
-
-       ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
+       ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
+       vmw_user_bo_noref_release();
        if (unlikely(ret != 0))
-               goto out_no_reloc;
+               return ret;
+
+       reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
+       if (!reloc)
+               return -ENOMEM;
 
+       reloc->location = ptr;
+       reloc->vbo = vmw_bo;
        *vmw_bo_p = vmw_bo;
-       return 0;
+       list_add_tail(&reloc->head, &sw_context->bo_relocations);
 
-out_no_reloc:
-       vmw_bo_unreference(&vmw_bo);
-       *vmw_bo_p = NULL;
-       return ret;
+       return 0;
 }
 
 
@@ -1400,7 +1257,7 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
        } *cmd;
 
        int    ret;
-       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
        struct vmw_resource *cotable_res;
 
 
@@ -1415,9 +1272,8 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
            cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
                return -EINVAL;
 
-       cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
+       cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
        ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
-       vmw_resource_unreference(&cotable_res);
 
        return ret;
 }
@@ -1462,11 +1318,8 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
                return ret;
 
        sw_context->dx_query_mob = vmw_bo;
-       sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
-
-       vmw_bo_unreference(&vmw_bo);
-
-       return ret;
+       sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
+       return 0;
 }
 
 
@@ -1567,7 +1420,6 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
 
        ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
 
-       vmw_bo_unreference(&vmw_bo);
        return ret;
 }
 
@@ -1621,7 +1473,6 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
 
        ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
 
-       vmw_bo_unreference(&vmw_bo);
        return ret;
 }
 
@@ -1654,7 +1505,6 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                return ret;
 
-       vmw_bo_unreference(&vmw_bo);
        return 0;
 }
 
@@ -1706,7 +1556,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                return ret;
 
-       vmw_bo_unreference(&vmw_bo);
        return 0;
 }
 
@@ -1757,7 +1606,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
        if (unlikely(ret != 0)) {
                if (unlikely(ret != -ERESTARTSYS))
                        DRM_ERROR("could not find surface for DMA.\n");
-               goto out_no_surface;
+               return ret;
        }
 
        srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
@@ -1765,9 +1614,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
        vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
                             header);
 
-out_no_surface:
-       vmw_bo_unreference(&vmw_bo);
-       return ret;
+       return 0;
 }
 
 static int vmw_cmd_draw(struct vmw_private *dev_priv,
@@ -1837,8 +1684,8 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
          ((unsigned long) header + header->size + sizeof(header));
        SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
                ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
-       struct vmw_resource_val_node *ctx_node;
-       struct vmw_resource_val_node *res_node;
+       struct vmw_resource *ctx;
+       struct vmw_resource *res;
        int ret;
 
        cmd = container_of(header, struct vmw_tex_state_cmd,
@@ -1846,7 +1693,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
 
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
                                user_context_converter, &cmd->state.cid,
-                               &ctx_node);
+                               &ctx);
        if (unlikely(ret != 0))
                return ret;
 
@@ -1862,19 +1709,24 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
 
                ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
                                        user_surface_converter,
-                                       &cur_state->value, &res_node);
+                                       &cur_state->value, &res);
                if (unlikely(ret != 0))
                        return ret;
 
                if (dev_priv->has_mob) {
                        struct vmw_ctx_bindinfo_tex binding;
+                       struct vmw_ctx_validation_info *node;
+
+                       node = vmw_execbuf_info_from_res(sw_context, ctx);
+                       if (!node)
+                               return -EINVAL;
 
-                       binding.bi.ctx = ctx_node->res;
-                       binding.bi.res = res_node ? res_node->res : NULL;
+                       binding.bi.ctx = ctx;
+                       binding.bi.res = res;
                        binding.bi.bt = vmw_ctx_binding_tex;
                        binding.texture_stage = cur_state->stage;
-                       vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
-                                       0, binding.texture_stage);
+                       vmw_binding_add(node->staged, &binding.bi, 0,
+                                       binding.texture_stage);
                }
        }
 
@@ -1893,14 +1745,9 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
                SVGAFifoCmdDefineGMRFB body;
        } *cmd = buf;
 
-       ret = vmw_translate_guest_ptr(dev_priv, sw_context,
-                                     &cmd->body.ptr,
-                                     &vmw_bo);
-       if (unlikely(ret != 0))
-               return ret;
-
-       vmw_bo_unreference(&vmw_bo);
-
+       return vmw_translate_guest_ptr(dev_priv, sw_context,
+                                      &cmd->body.ptr,
+                                      &vmw_bo);
        return ret;
 }
 
@@ -1922,25 +1769,24 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
  */
 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
                                     struct vmw_sw_context *sw_context,
-                                    struct vmw_resource_val_node *val_node,
+                                    struct vmw_resource *res,
                                     uint32_t *buf_id,
                                     unsigned long backup_offset)
 {
-       struct vmw_buffer_object *dma_buf;
+       struct vmw_buffer_object *vbo;
+       void *info;
        int ret;
 
-       ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
+       info = vmw_execbuf_info_from_res(sw_context, res);
+       if (!info)
+               return -EINVAL;
+
+       ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
        if (ret)
                return ret;
 
-       val_node->switching_backup = true;
-       if (val_node->first_usage)
-               val_node->no_buffer_needed = true;
-
-       vmw_bo_unreference(&val_node->new_backup);
-       val_node->new_backup = dma_buf;
-       val_node->new_backup_offset = backup_offset;
-
+       vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
+                                        backup_offset);
        return 0;
 }
 
@@ -1970,15 +1816,15 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
                                 uint32_t *buf_id,
                                 unsigned long backup_offset)
 {
-       struct vmw_resource_val_node *val_node;
+       struct vmw_resource *res;
        int ret;
 
        ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
-                               converter, res_id, &val_node);
+                               converter, res_id, &res);
        if (ret)
                return ret;
 
-       return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
+       return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
                                         buf_id, backup_offset);
 }
 
@@ -2170,14 +2016,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
        } *cmd;
        int ret;
        size_t size;
-       struct vmw_resource_val_node *val;
+       struct vmw_resource *ctx;
 
        cmd = container_of(header, struct vmw_shader_define_cmd,
                           header);
 
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
                                user_context_converter, &cmd->body.cid,
-                               &val);
+                               &ctx);
        if (unlikely(ret != 0))
                return ret;
 
@@ -2186,14 +2032,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
 
        size = cmd->header.size - sizeof(cmd->body);
        ret = vmw_compat_shader_add(dev_priv,
-                                   vmw_context_res_man(val->res),
+                                   vmw_context_res_man(ctx),
                                    cmd->body.shid, cmd + 1,
                                    cmd->body.type, size,
                                    &sw_context->staged_cmd_res);
        if (unlikely(ret != 0))
                return ret;
 
-       return vmw_resource_relocation_add(&sw_context->res_relocations,
+       return vmw_resource_relocation_add(sw_context,
                                           NULL,
                                           vmw_ptr_diff(sw_context->buf_start,
                                                        &cmd->header.id),
@@ -2217,28 +2063,28 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
                SVGA3dCmdDestroyShader body;
        } *cmd;
        int ret;
-       struct vmw_resource_val_node *val;
+       struct vmw_resource *ctx;
 
        cmd = container_of(header, struct vmw_shader_destroy_cmd,
                           header);
 
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
                                user_context_converter, &cmd->body.cid,
-                               &val);
+                               &ctx);
        if (unlikely(ret != 0))
                return ret;
 
        if (unlikely(!dev_priv->has_mob))
                return 0;
 
-       ret = vmw_shader_remove(vmw_context_res_man(val->res),
+       ret = vmw_shader_remove(vmw_context_res_man(ctx),
                                cmd->body.shid,
                                cmd->body.type,
                                &sw_context->staged_cmd_res);
        if (unlikely(ret != 0))
                return ret;
 
-       return vmw_resource_relocation_add(&sw_context->res_relocations,
+       return vmw_resource_relocation_add(sw_context,
                                           NULL,
                                           vmw_ptr_diff(sw_context->buf_start,
                                                        &cmd->header.id),
@@ -2261,9 +2107,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
                SVGA3dCmdHeader header;
                SVGA3dCmdSetShader body;
        } *cmd;
-       struct vmw_resource_val_node *ctx_node, *res_node = NULL;
        struct vmw_ctx_bindinfo_shader binding;
-       struct vmw_resource *res = NULL;
+       struct vmw_resource *ctx, *res = NULL;
+       struct vmw_ctx_validation_info *ctx_info;
        int ret;
 
        cmd = container_of(header, struct vmw_set_shader_cmd,
@@ -2277,7 +2123,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
 
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
                                user_context_converter, &cmd->body.cid,
-                               &ctx_node);
+                               &ctx);
        if (unlikely(ret != 0))
                return ret;
 
@@ -2285,34 +2131,35 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
                return 0;
 
        if (cmd->body.shid != SVGA3D_INVALID_ID) {
-               res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+               res = vmw_shader_lookup(vmw_context_res_man(ctx),
                                        cmd->body.shid,
                                        cmd->body.type);
 
                if (!IS_ERR(res)) {
-                       ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
-                                                   &cmd->body.shid, res,
-                                                   &res_node);
-                       vmw_resource_unreference(&res);
+                       ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
                        if (unlikely(ret != 0))
                                return ret;
                }
        }
 
-       if (!res_node) {
+       if (IS_ERR_OR_NULL(res)) {
                ret = vmw_cmd_res_check(dev_priv, sw_context,
                                        vmw_res_shader,
                                        user_shader_converter,
-                                       &cmd->body.shid, &res_node);
+                                       &cmd->body.shid, &res);
                if (unlikely(ret != 0))
                        return ret;
        }
 
-       binding.bi.ctx = ctx_node->res;
-       binding.bi.res = res_node ? res_node->res : NULL;
+       ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
+       if (!ctx_info)
+               return -EINVAL;
+
+       binding.bi.ctx = ctx;
+       binding.bi.res = res;
        binding.bi.bt = vmw_ctx_binding_shader;
        binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
-       vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+       vmw_binding_add(ctx_info->staged, &binding.bi,
                        binding.shader_slot, 0);
        return 0;
 }
@@ -2393,8 +2240,8 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
                SVGA3dCmdHeader header;
                SVGA3dCmdDXSetSingleConstantBuffer body;
        } *cmd;
-       struct vmw_resource_val_node *res_node = NULL;
-       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_resource *res = NULL;
+       struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
        struct vmw_ctx_bindinfo_cb binding;
        int ret;
 
@@ -2406,12 +2253,12 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
        cmd = container_of(header, typeof(*cmd), header);
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
                                user_surface_converter,
-                               &cmd->body.sid, &res_node);
+                               &cmd->body.sid, &res);
        if (unlikely(ret != 0))
                return ret;
 
-       binding.bi.ctx = ctx_node->res;
-       binding.bi.res = res_node ? res_node->res : NULL;
+       binding.bi.ctx = ctx_node->ctx;
+       binding.bi.res = res;
        binding.bi.bt = vmw_ctx_binding_cb;
        binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
        binding.offset = cmd->body.offsetInBytes;
@@ -2426,7 +2273,7 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
                return -EINVAL;
        }
 
-       vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+       vmw_binding_add(ctx_node->staged, &binding.bi,
                        binding.shader_slot, binding.slot);
 
        return 0;
@@ -2482,7 +2329,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
                SVGA3dCmdDXSetShader body;
        } *cmd;
        struct vmw_resource *res = NULL;
-       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
        struct vmw_ctx_bindinfo_shader binding;
        int ret = 0;
 
@@ -2506,23 +2353,20 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
                        return PTR_ERR(res);
                }
 
-               ret = vmw_resource_val_add(sw_context, res, NULL);
+               ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
                if (ret)
-                       goto out_unref;
+                       return ret;
        }
 
-       binding.bi.ctx = ctx_node->res;
+       binding.bi.ctx = ctx_node->ctx;
        binding.bi.res = res;
        binding.bi.bt = vmw_ctx_binding_dx_shader;
        binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
 
-       vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+       vmw_binding_add(ctx_node->staged, &binding.bi,
                        binding.shader_slot, 0);
-out_unref:
-       if (res)
-               vmw_resource_unreference(&res);
 
-       return ret;
+       return 0;
 }
 
 /**
@@ -2537,9 +2381,9 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
                                         struct vmw_sw_context *sw_context,
                                         SVGA3dCmdHeader *header)
 {
-       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
        struct vmw_ctx_bindinfo_vb binding;
-       struct vmw_resource_val_node *res_node;
+       struct vmw_resource *res;
        struct {
                SVGA3dCmdHeader header;
                SVGA3dCmdDXSetVertexBuffers body;
@@ -2564,18 +2408,18 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
        for (i = 0; i < num; i++) {
                ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
                                        user_surface_converter,
-                                       &cmd->buf[i].sid, &res_node);
+                                       &cmd->buf[i].sid, &res);
                if (unlikely(ret != 0))
                        return ret;
 
-               binding.bi.ctx = ctx_node->res;
+               binding.bi.ctx = ctx_node->ctx;
                binding.bi.bt = vmw_ctx_binding_vb;
-               binding.bi.res = ((res_node) ? res_node->res : NULL);
+               binding.bi.res = res;
                binding.offset = cmd->buf[i].offset;
                binding.stride = cmd->buf[i].stride;
                binding.slot = i + cmd->body.startBuffer;
 
-               vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+               vmw_binding_add(ctx_node->staged, &binding.bi,
                                0, binding.slot);
        }
 
@@ -2594,9 +2438,9 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
                                       struct vmw_sw_context *sw_context,
                                       SVGA3dCmdHeader *header)
 {
-       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
        struct vmw_ctx_bindinfo_ib binding;
-       struct vmw_resource_val_node *res_node;
+       struct vmw_resource *res;
        struct {
                SVGA3dCmdHeader header;
                SVGA3dCmdDXSetIndexBuffer body;
@@ -2611,17 +2455,17 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
        cmd = container_of(header, typeof(*cmd), header);
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
                                user_surface_converter,
-                               &cmd->body.sid, &res_node);
+                               &cmd->body.sid, &res);
        if (unlikely(ret != 0))
                return ret;
 
-       binding.bi.ctx = ctx_node->res;
-       binding.bi.res = ((res_node) ? res_node->res : NULL);
+       binding.bi.ctx = ctx_node->ctx;
+       binding.bi.res = res;
        binding.bi.bt = vmw_ctx_binding_ib;
        binding.offset = cmd->body.offset;
        binding.format = cmd->body.format;
 
-       vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
+       vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
 
        return 0;
 }
@@ -2679,8 +2523,8 @@ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
                SVGA3dCmdDXClearRenderTargetView body;
        } *cmd = container_of(header, typeof(*cmd), header);
 
-       return vmw_view_id_val_add(sw_context, vmw_view_rt,
-                                  cmd->body.renderTargetViewId);
+       return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
+                                          cmd->body.renderTargetViewId));
 }
 
 /**
@@ -2700,16 +2544,16 @@ static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
                SVGA3dCmdDXClearDepthStencilView body;
        } *cmd = container_of(header, typeof(*cmd), header);
 
-       return vmw_view_id_val_add(sw_context, vmw_view_ds,
-                                  cmd->body.depthStencilViewId);
+       return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
+                                          cmd->body.depthStencilViewId));
 }
 
 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
                                  struct vmw_sw_context *sw_context,
                                  SVGA3dCmdHeader *header)
 {
-       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
-       struct vmw_resource_val_node *srf_node;
+       struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_resource *srf;
        struct vmw_resource *res;
        enum vmw_view_type view_type;
        int ret;
@@ -2734,19 +2578,18 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
        cmd = container_of(header, typeof(*cmd), header);
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
                                user_surface_converter,
-                               &cmd->sid, &srf_node);
+                               &cmd->sid, &srf);
        if (unlikely(ret != 0))
                return ret;
 
-       res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
+       res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
        ret = vmw_cotable_notify(res, cmd->defined_id);
-       vmw_resource_unreference(&res);
        if (unlikely(ret != 0))
                return ret;
 
        return vmw_view_add(sw_context->man,
-                           ctx_node->res,
-                           srf_node->res,
+                           ctx_node->ctx,
+                           srf,
                            view_type,
                            cmd->defined_id,
                            header,
@@ -2766,9 +2609,9 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
                                     struct vmw_sw_context *sw_context,
                                     SVGA3dCmdHeader *header)
 {
-       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
        struct vmw_ctx_bindinfo_so binding;
-       struct vmw_resource_val_node *res_node;
+       struct vmw_resource *res;
        struct {
                SVGA3dCmdHeader header;
                SVGA3dCmdDXSetSOTargets body;
@@ -2793,18 +2636,18 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
        for (i = 0; i < num; i++) {
                ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
                                        user_surface_converter,
-                                       &cmd->targets[i].sid, &res_node);
+                                       &cmd->targets[i].sid, &res);
                if (unlikely(ret != 0))
                        return ret;
 
-               binding.bi.ctx = ctx_node->res;
-               binding.bi.res = ((res_node) ? res_node->res : NULL);
+               binding.bi.ctx = ctx_node->ctx;
+               binding.bi.res = res;
                binding.bi.bt = vmw_ctx_binding_so,
                binding.offset = cmd->targets[i].offset;
                binding.size = cmd->targets[i].sizeInBytes;
                binding.slot = i;
 
-               vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+               vmw_binding_add(ctx_node->staged, &binding.bi,
                                0, binding.slot);
        }
 
@@ -2815,7 +2658,7 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
                                struct vmw_sw_context *sw_context,
                                SVGA3dCmdHeader *header)
 {
-       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
        struct vmw_resource *res;
        /*
         * This is based on the fact that all affected define commands have
@@ -2834,10 +2677,9 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
        }
 
        so_type = vmw_so_cmd_to_type(header->id);
-       res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
+       res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
        cmd = container_of(header, typeof(*cmd), header);
        ret = vmw_cotable_notify(res, cmd->defined_id);
-       vmw_resource_unreference(&res);
 
        return ret;
 }
@@ -2882,7 +2724,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
                                struct vmw_sw_context *sw_context,
                                SVGA3dCmdHeader *header)
 {
-       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
 
        if (unlikely(ctx_node == NULL)) {
                DRM_ERROR("DX Context not set.\n");
@@ -2907,7 +2749,7 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
                                  struct vmw_sw_context *sw_context,
                                  SVGA3dCmdHeader *header)
 {
-       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
        struct {
                SVGA3dCmdHeader header;
                union vmw_view_destroy body;
@@ -2934,7 +2776,7 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
         * relocation to conditionally make this command a NOP to avoid
         * device errors.
         */
-       return vmw_resource_relocation_add(&sw_context->res_relocations,
+       return vmw_resource_relocation_add(sw_context,
                                           view,
                                           vmw_ptr_diff(sw_context->buf_start,
                                                        &cmd->header.id),
@@ -2953,7 +2795,7 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
                                    struct vmw_sw_context *sw_context,
                                    SVGA3dCmdHeader *header)
 {
-       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
        struct vmw_resource *res;
        struct {
                SVGA3dCmdHeader header;
@@ -2966,13 +2808,12 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
                return -EINVAL;
        }
 
-       res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
+       res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
        ret = vmw_cotable_notify(res, cmd->body.shaderId);
-       vmw_resource_unreference(&res);
        if (ret)
                return ret;
 
-       return vmw_dx_shader_add(sw_context->man, ctx_node->res,
+       return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
                                 cmd->body.shaderId, cmd->body.type,
                                 &sw_context->staged_cmd_res);
 }
@@ -2989,7 +2830,7 @@ static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
                                     struct vmw_sw_context *sw_context,
                                     SVGA3dCmdHeader *header)
 {
-       struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+       struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
        struct {
                SVGA3dCmdHeader header;
                SVGA3dCmdDXDestroyShader body;
@@ -3021,8 +2862,7 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
                                  struct vmw_sw_context *sw_context,
                                  SVGA3dCmdHeader *header)
 {
-       struct vmw_resource_val_node *ctx_node;
-       struct vmw_resource_val_node *res_node;
+       struct vmw_resource *ctx;
        struct vmw_resource *res;
        struct {
                SVGA3dCmdHeader header;
@@ -3033,38 +2873,33 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
        if (cmd->body.cid != SVGA3D_INVALID_ID) {
                ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
                                        user_context_converter,
-                                       &cmd->body.cid, &ctx_node);
+                                       &cmd->body.cid, &ctx);
                if (ret)
                        return ret;
        } else {
-               ctx_node = sw_context->dx_ctx_node;
-               if (!ctx_node) {
+               if (!sw_context->dx_ctx_node) {
                        DRM_ERROR("DX Context not set.\n");
                        return -EINVAL;
                }
+               ctx = sw_context->dx_ctx_node->ctx;
        }
 
-       res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+       res = vmw_shader_lookup(vmw_context_res_man(ctx),
                                cmd->body.shid, 0);
        if (IS_ERR(res)) {
                DRM_ERROR("Could not find shader to bind.\n");
                return PTR_ERR(res);
        }
 
-       ret = vmw_resource_val_add(sw_context, res, &res_node);
+       ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
        if (ret) {
                DRM_ERROR("Error creating resource validation node.\n");
-               goto out_unref;
+               return ret;
        }
 
-
-       ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
-                                       &cmd->body.mobid,
-                                       cmd->body.offsetInBytes);
-out_unref:
-       vmw_resource_unreference(&res);
-
-       return ret;
+       return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
+                                        &cmd->body.mobid,
+                                        cmd->body.offsetInBytes);
 }
 
 /**
@@ -3083,8 +2918,8 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
                SVGA3dCmdDXGenMips body;
        } *cmd = container_of(header, typeof(*cmd), header);
 
-       return vmw_view_id_val_add(sw_context, vmw_view_sr,
-                                  cmd->body.shaderResourceViewId);
+       return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
+                                          cmd->body.shaderResourceViewId));
 }
 
 /**
@@ -3638,20 +3473,18 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
 
 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
 {
-       sw_context->cur_reloc = 0;
+       /* Memory is validation context memory, so no need to free it */
+
+       INIT_LIST_HEAD(&sw_context->bo_relocations);
 }
 
 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
 {
-       uint32_t i;
        struct vmw_relocation *reloc;
-       struct ttm_validate_buffer *validate;
        struct ttm_buffer_object *bo;
 
-       for (i = 0; i < sw_context->cur_reloc; ++i) {
-               reloc = &sw_context->relocs[i];
-               validate = &sw_context->val_bufs[reloc->index].base;
-               bo = validate->bo;
+       list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
+               bo = &reloc->vbo->base;
                switch (bo->mem.mem_type) {
                case TTM_PL_VRAM:
                        reloc->location->offset += bo->offset;
@@ -3670,110 +3503,6 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
        vmw_free_relocations(sw_context);
 }
 
-/**
- * vmw_resource_list_unrefererence - Free up a resource list and unreference
- * all resources referenced by it.
- *
- * @list: The resource list.
- */
-static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
-                                         struct list_head *list)
-{
-       struct vmw_resource_val_node *val, *val_next;
-
-       /*
-        * Drop references to resources held during command submission.
-        */
-
-       list_for_each_entry_safe(val, val_next, list, head) {
-               list_del_init(&val->head);
-               vmw_resource_unreference(&val->res);
-
-               if (val->staged_bindings) {
-                       if (val->staged_bindings != sw_context->staged_bindings)
-                               vmw_binding_state_free(val->staged_bindings);
-                       else
-                               sw_context->staged_bindings_inuse = false;
-                       val->staged_bindings = NULL;
-               }
-
-               kfree(val);
-       }
-}
-
-static void vmw_clear_validations(struct vmw_sw_context *sw_context)
-{
-       struct vmw_validate_buffer *entry, *next;
-       struct vmw_resource_val_node *val;
-
-       /*
-        * Drop references to DMA buffers held during command submission.
-        */
-       list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
-                                base.head) {
-               list_del(&entry->base.head);
-               ttm_bo_unref(&entry->base.bo);
-               (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
-               sw_context->cur_val_buf--;
-       }
-       BUG_ON(sw_context->cur_val_buf != 0);
-
-       list_for_each_entry(val, &sw_context->resource_list, head)
-               (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
-}
-
-int vmw_validate_single_buffer(struct vmw_private *dev_priv,
-                              struct ttm_buffer_object *bo,
-                              bool interruptible,
-                              bool validate_as_mob)
-{
-       struct vmw_buffer_object *vbo =
-               container_of(bo, struct vmw_buffer_object, base);
-       struct ttm_operation_ctx ctx = { interruptible, false };
-       int ret;
-
-       if (vbo->pin_count > 0)
-               return 0;
-
-       if (validate_as_mob)
-               return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
-
-       /**
-        * Put BO in VRAM if there is space, otherwise as a GMR.
-        * If there is no space in VRAM and GMR ids are all used up,
-        * start evicting GMRs to make room. If the DMA buffer can't be
-        * used as a GMR, this will return -ENOMEM.
-        */
-
-       ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
-       if (likely(ret == 0 || ret == -ERESTARTSYS))
-               return ret;
-
-       /**
-        * If that failed, try VRAM again, this time evicting
-        * previous contents.
-        */
-
-       ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
-       return ret;
-}
-
-static int vmw_validate_buffers(struct vmw_private *dev_priv,
-                               struct vmw_sw_context *sw_context)
-{
-       struct vmw_validate_buffer *entry;
-       int ret;
-
-       list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
-               ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
-                                                true,
-                                                entry->validate_as_mob);
-               if (unlikely(ret != 0))
-                       return ret;
-       }
-       return 0;
-}
-
 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
                                 uint32_t size)
 {
@@ -3946,7 +3675,7 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
 
        if (sw_context->dx_ctx_node)
                cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
-                                         sw_context->dx_ctx_node->res->id);
+                                         sw_context->dx_ctx_node->ctx->id);
        else
                cmd = vmw_fifo_reserve(dev_priv, command_size);
        if (!cmd) {
@@ -3980,7 +3709,7 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
                                     u32 command_size,
                                     struct vmw_sw_context *sw_context)
 {
-       u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
+       u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
                  SVGA3D_INVALID_ID);
        void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
                                       id, false, header);
@@ -4057,31 +3786,35 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
                                   struct vmw_sw_context *sw_context,
                                   uint32_t handle)
 {
-       struct vmw_resource_val_node *ctx_node;
        struct vmw_resource *res;
        int ret;
+       unsigned int size;
 
        if (handle == SVGA3D_INVALID_ID)
                return 0;
 
-       ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
-                                             handle, user_context_converter,
-                                             &res);
-       if (unlikely(ret != 0)) {
+       size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
+       ret = vmw_validation_preload_res(sw_context->ctx, size);
+       if (ret)
+               return ret;
+
+       res = vmw_user_resource_noref_lookup_handle
+               (dev_priv, sw_context->fp->tfile, handle,
+                user_context_converter);
+       if (unlikely(IS_ERR(res))) {
                DRM_ERROR("Could not find or user DX context 0x%08x.\n",
                          (unsigned) handle);
-               return ret;
+               return PTR_ERR(res);
        }
 
-       ret = vmw_resource_val_add(sw_context, res, &ctx_node);
+       ret = vmw_execbuf_res_noref_val_add(sw_context, res);
        if (unlikely(ret != 0))
-               goto out_err;
+               return ret;
 
-       sw_context->dx_ctx_node = ctx_node;
+       sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
        sw_context->man = vmw_context_res_man(res);
-out_err:
-       vmw_resource_unreference(&res);
-       return ret;
+
+       return 0;
 }
 
 int vmw_execbuf_process(struct drm_file *file_priv,
@@ -4097,15 +3830,12 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 {
        struct vmw_sw_context *sw_context = &dev_priv->ctx;
        struct vmw_fence_obj *fence = NULL;
-       struct vmw_resource *error_resource;
-       struct list_head resource_list;
        struct vmw_cmdbuf_header *header;
-       struct ww_acquire_ctx ticket;
        uint32_t handle;
        int ret;
        int32_t out_fence_fd = -1;
        struct sync_file *sync_file = NULL;
-
+       DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
 
        if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
                out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
@@ -4157,10 +3887,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
                sw_context->kernel = true;
 
        sw_context->fp = vmw_fpriv(file_priv);
-       sw_context->cur_reloc = 0;
-       sw_context->cur_val_buf = 0;
-       INIT_LIST_HEAD(&sw_context->resource_list);
-       INIT_LIST_HEAD(&sw_context->ctx_resource_list);
+       INIT_LIST_HEAD(&sw_context->ctx_list);
        sw_context->cur_query_bo = dev_priv->pinned_bo;
        sw_context->last_query_ctx = NULL;
        sw_context->needs_post_query_barrier = false;
@@ -4168,8 +3895,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        sw_context->dx_query_mob = NULL;
        sw_context->dx_query_ctx = NULL;
        memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
-       INIT_LIST_HEAD(&sw_context->validate_nodes);
        INIT_LIST_HEAD(&sw_context->res_relocations);
+       INIT_LIST_HEAD(&sw_context->bo_relocations);
        if (sw_context->staged_bindings)
                vmw_binding_state_reset(sw_context->staged_bindings);
 
@@ -4180,24 +3907,13 @@ int vmw_execbuf_process(struct drm_file *file_priv,
                sw_context->res_ht_initialized = true;
        }
        INIT_LIST_HEAD(&sw_context->staged_cmd_res);
-       INIT_LIST_HEAD(&resource_list);
+       sw_context->ctx = &val_ctx;
        ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
-       if (unlikely(ret != 0)) {
-               list_splice_init(&sw_context->ctx_resource_list,
-                                &sw_context->resource_list);
+       if (unlikely(ret != 0))
                goto out_err_nores;
-       }
 
        ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
                                command_size);
-       /*
-        * Merge the resource lists before checking the return status
-        * from vmd_cmd_check_all so that all the open hashtabs will
-        * be handled properly even if vmw_cmd_check_all fails.
-        */
-       list_splice_init(&sw_context->ctx_resource_list,
-                        &sw_context->resource_list);
-
        if (unlikely(ret != 0))
                goto out_err_nores;
 
@@ -4205,18 +3921,18 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        if (unlikely(ret != 0))
                goto out_err_nores;
 
-       ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
-                                    true, NULL);
+       ret = vmw_validation_bo_reserve(&val_ctx, true);
        if (unlikely(ret != 0))
                goto out_err_nores;
 
-       ret = vmw_validate_buffers(dev_priv, sw_context);
+       ret = vmw_validation_bo_validate(&val_ctx, true);
        if (unlikely(ret != 0))
                goto out_err;
 
-       ret = vmw_resources_validate(sw_context);
+       ret = vmw_validation_res_validate(&val_ctx, true);
        if (unlikely(ret != 0))
                goto out_err;
+       vmw_validation_drop_ht(&val_ctx);
 
        ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
        if (unlikely(ret != 0)) {
@@ -4255,17 +3971,16 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        if (ret != 0)
                DRM_ERROR("Fence submission error. Syncing.\n");
 
-       vmw_resources_unreserve(sw_context, false);
+       vmw_execbuf_bindings_commit(sw_context, false);
+       vmw_bind_dx_query_mob(sw_context);
+       vmw_validation_res_unreserve(&val_ctx, false);
 
-       ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
-                                   (void *) fence);
+       vmw_validation_bo_fence(sw_context->ctx, fence);
 
        if (unlikely(dev_priv->pinned_bo != NULL &&
                     !dev_priv->query_cid_valid))
                __vmw_execbuf_release_pinned_bo(dev_priv, fence);
 
-       vmw_clear_validations(sw_context);
-
        /*
         * If anything fails here, give up trying to export the fence
         * and do a sync since the user mode will not be able to sync
@@ -4300,7 +4015,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
                vmw_fence_obj_unreference(&fence);
        }
 
-       list_splice_init(&sw_context->resource_list, &resource_list);
        vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
        mutex_unlock(&dev_priv->cmdbuf_mutex);
 
@@ -4308,36 +4022,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
         * Unreference resources outside of the cmdbuf_mutex to
         * avoid deadlocks in resource destruction paths.
         */
-       vmw_resource_list_unreference(sw_context, &resource_list);
+       vmw_validation_unref_lists(&val_ctx);
 
        return 0;
 
 out_unlock_binding:
        mutex_unlock(&dev_priv->binding_mutex);
 out_err:
-       ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
+       vmw_validation_bo_backoff(&val_ctx);
 out_err_nores:
-       vmw_resources_unreserve(sw_context, true);
+       vmw_execbuf_bindings_commit(sw_context, true);
+       vmw_validation_res_unreserve(&val_ctx, true);
        vmw_resource_relocations_free(&sw_context->res_relocations);
        vmw_free_relocations(sw_context);
-       vmw_clear_validations(sw_context);
        if (unlikely(dev_priv->pinned_bo != NULL &&
                     !dev_priv->query_cid_valid))
                __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
 out_unlock:
-       list_splice_init(&sw_context->resource_list, &resource_list);
-       error_resource = sw_context->error_resource;
-       sw_context->error_resource = NULL;
        vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
+       vmw_validation_drop_ht(&val_ctx);
+       WARN_ON(!list_empty(&sw_context->ctx_list));
        mutex_unlock(&dev_priv->cmdbuf_mutex);
 
        /*
         * Unreference resources outside of the cmdbuf_mutex to
         * avoid deadlocks in resource destruction paths.
         */
-       vmw_resource_list_unreference(sw_context, &resource_list);
-       if (unlikely(error_resource != NULL))
-               vmw_resource_unreference(&error_resource);
+       vmw_validation_unref_lists(&val_ctx);
 out_free_header:
        if (header)
                vmw_cmdbuf_header_free(header);
@@ -4398,38 +4109,31 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
                                     struct vmw_fence_obj *fence)
 {
        int ret = 0;
-       struct list_head validate_list;
-       struct ttm_validate_buffer pinned_val, query_val;
        struct vmw_fence_obj *lfence = NULL;
-       struct ww_acquire_ctx ticket;
+       DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
 
        if (dev_priv->pinned_bo == NULL)
                goto out_unlock;
 
-       INIT_LIST_HEAD(&validate_list);
-
-       pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
-       pinned_val.shared = false;
-       list_add_tail(&pinned_val.head, &validate_list);
+       ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
+                                   false);
+       if (ret)
+               goto out_no_reserve;
 
-       query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
-       query_val.shared = false;
-       list_add_tail(&query_val.head, &validate_list);
+       ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
+                                   false);
+       if (ret)
+               goto out_no_reserve;
 
-       ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
-                                    false, NULL);
-       if (unlikely(ret != 0)) {
-               vmw_execbuf_unpin_panic(dev_priv);
+       ret = vmw_validation_bo_reserve(&val_ctx, false);
+       if (ret)
                goto out_no_reserve;
-       }
 
        if (dev_priv->query_cid_valid) {
                BUG_ON(fence != NULL);
                ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
-               if (unlikely(ret != 0)) {
-                       vmw_execbuf_unpin_panic(dev_priv);
+               if (ret)
                        goto out_no_emit;
-               }
                dev_priv->query_cid_valid = false;
        }
 
@@ -4443,22 +4147,22 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
                                                  NULL);
                fence = lfence;
        }
-       ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
+       vmw_validation_bo_fence(&val_ctx, fence);
        if (lfence != NULL)
                vmw_fence_obj_unreference(&lfence);
 
-       ttm_bo_unref(&query_val.bo);
-       ttm_bo_unref(&pinned_val.bo);
+       vmw_validation_unref_lists(&val_ctx);
        vmw_bo_unreference(&dev_priv->pinned_bo);
 out_unlock:
        return;
 
 out_no_emit:
-       ttm_eu_backoff_reservation(&ticket, &validate_list);
+       vmw_validation_bo_backoff(&val_ctx);
 out_no_reserve:
-       ttm_bo_unref(&query_val.bo);
-       ttm_bo_unref(&pinned_val.bo);
+       vmw_validation_unref_lists(&val_ctx);
+       vmw_execbuf_unpin_panic(dev_priv);
        vmw_bo_unreference(&dev_priv->pinned_bo);
+
 }
 
 /**
index 3d546d4093341fd6ba329c06830f173666e2f707..f87261545f2c1e8123846af64d406f32efcb4883 100644 (file)
@@ -306,7 +306,8 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
        INIT_LIST_HEAD(&fman->cleanup_list);
        INIT_WORK(&fman->work, &vmw_fence_work_func);
        fman->fifo_down = true;
-       fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
+       fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
+               TTM_OBJ_EXTRA_SIZE;
        fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
        fman->event_fence_action_size =
                ttm_round_pot(sizeof(struct vmw_event_fence_action));
@@ -650,7 +651,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
        }
 
        *p_fence = &ufence->fence;
-       *p_handle = ufence->base.hash.key;
+       *p_handle = ufence->base.handle;
 
        return 0;
 out_err:
@@ -1137,7 +1138,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
                                          "object.\n");
                                goto out_no_ref_obj;
                        }
-                       handle = base->hash.key;
+                       handle = base->handle;
                }
                ttm_base_object_unref(&base);
        }
index 292e48feba83bf7453d8907ff40613c89be5c5c0..dca04d4246ea8d826b2e31b6ce507b64c14f7bda 100644 (file)
@@ -2575,88 +2575,31 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
 }
 
 /**
- * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
- * command submission.
- *
- * @dev_priv. Pointer to a device private structure.
- * @buf: The buffer object
- * @interruptible: Whether to perform waits as interruptible.
- * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
- * The buffer will be validated as a GMR. Already pinned buffers will not be
- * validated.
- *
- * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
- * interrupted by a signal.
+ * vmw_kms_helper_validation_finish - Helper for post KMS command submission
+ * cleanup and fencing
+ * @dev_priv: Pointer to the device-private struct
+ * @file_priv: Pointer identifying the client when user-space fencing is used
+ * @ctx: Pointer to the validation context
+ * @out_fence: If non-NULL, returned refcounted fence-pointer
+ * @user_fence_rep: If non-NULL, pointer to user-space address area
+ * in which to copy user-space fence info
  */
-int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
-                                 struct vmw_buffer_object *buf,
-                                 bool interruptible,
-                                 bool validate_as_mob,
-                                 bool for_cpu_blit)
-{
-       struct ttm_operation_ctx ctx = {
-               .interruptible = interruptible,
-               .no_wait_gpu = false};
-       struct ttm_buffer_object *bo = &buf->base;
-       int ret;
-
-       ttm_bo_reserve(bo, false, false, NULL);
-       if (for_cpu_blit)
-               ret = ttm_bo_validate(bo, &vmw_nonfixed_placement, &ctx);
-       else
-               ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
-                                                validate_as_mob);
-       if (ret)
-               ttm_bo_unreserve(bo);
-
-       return ret;
-}
-
-/**
- * vmw_kms_helper_buffer_revert - Undo the actions of
- * vmw_kms_helper_buffer_prepare.
- *
- * @res: Pointer to the buffer object.
- *
- * Helper to be used if an error forces the caller to undo the actions of
- * vmw_kms_helper_buffer_prepare.
- */
-void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf)
-{
-       if (buf)
-               ttm_bo_unreserve(&buf->base);
-}
-
-/**
- * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
- * kms command submission.
- *
- * @dev_priv: Pointer to a device private structure.
- * @file_priv: Pointer to a struct drm_file representing the caller's
- * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
- * if non-NULL, @user_fence_rep must be non-NULL.
- * @buf: The buffer object.
- * @out_fence:  Optional pointer to a fence pointer. If non-NULL, a
- * ref-counted fence pointer is returned here.
- * @user_fence_rep: Optional pointer to a user-space provided struct
- * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
- * function copies fence data to user-space in a fail-safe manner.
- */
-void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
-                                 struct drm_file *file_priv,
-                                 struct vmw_buffer_object *buf,
-                                 struct vmw_fence_obj **out_fence,
-                                 struct drm_vmw_fence_rep __user *
-                                 user_fence_rep)
-{
-       struct vmw_fence_obj *fence;
+void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
+                                     struct drm_file *file_priv,
+                                     struct vmw_validation_context *ctx,
+                                     struct vmw_fence_obj **out_fence,
+                                     struct drm_vmw_fence_rep __user *
+                                     user_fence_rep)
+{
+       struct vmw_fence_obj *fence = NULL;
        uint32_t handle;
        int ret;
 
-       ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
-                                        file_priv ? &handle : NULL);
-       if (buf)
-               vmw_bo_fence_single(&buf->base, fence);
+       if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
+           out_fence)
+               ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
+                                                file_priv ? &handle : NULL);
+       vmw_validation_done(ctx, fence);
        if (file_priv)
                vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
                                            ret, user_fence_rep, fence,
@@ -2665,106 +2608,6 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
                *out_fence = fence;
        else
                vmw_fence_obj_unreference(&fence);
-
-       vmw_kms_helper_buffer_revert(buf);
-}
-
-
-/**
- * vmw_kms_helper_resource_revert - Undo the actions of
- * vmw_kms_helper_resource_prepare.
- *
- * @res: Pointer to the resource. Typically a surface.
- *
- * Helper to be used if an error forces the caller to undo the actions of
- * vmw_kms_helper_resource_prepare.
- */
-void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
-{
-       struct vmw_resource *res = ctx->res;
-
-       vmw_kms_helper_buffer_revert(ctx->buf);
-       vmw_bo_unreference(&ctx->buf);
-       vmw_resource_unreserve(res, false, NULL, 0);
-       mutex_unlock(&res->dev_priv->cmdbuf_mutex);
-}
-
-/**
- * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
- * command submission.
- *
- * @res: Pointer to the resource. Typically a surface.
- * @interruptible: Whether to perform waits as interruptible.
- *
- * Reserves and validates also the backup buffer if a guest-backed resource.
- * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
- * interrupted by a signal.
- */
-int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
-                                   bool interruptible,
-                                   struct vmw_validation_ctx *ctx)
-{
-       int ret = 0;
-
-       ctx->buf = NULL;
-       ctx->res = res;
-
-       if (interruptible)
-               ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
-       else
-               mutex_lock(&res->dev_priv->cmdbuf_mutex);
-
-       if (unlikely(ret != 0))
-               return -ERESTARTSYS;
-
-       ret = vmw_resource_reserve(res, interruptible, false);
-       if (ret)
-               goto out_unlock;
-
-       if (res->backup) {
-               ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
-                                                   interruptible,
-                                                   res->dev_priv->has_mob,
-                                                   false);
-               if (ret)
-                       goto out_unreserve;
-
-               ctx->buf = vmw_bo_reference(res->backup);
-       }
-       ret = vmw_resource_validate(res);
-       if (ret)
-               goto out_revert;
-       return 0;
-
-out_revert:
-       vmw_kms_helper_buffer_revert(ctx->buf);
-out_unreserve:
-       vmw_resource_unreserve(res, false, NULL, 0);
-out_unlock:
-       mutex_unlock(&res->dev_priv->cmdbuf_mutex);
-       return ret;
-}
-
-/**
- * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
- * kms command submission.
- *
- * @res: Pointer to the resource. Typically a surface.
- * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
- * ref-counted fence pointer is returned here.
- */
-void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
-                                   struct vmw_fence_obj **out_fence)
-{
-       struct vmw_resource *res = ctx->res;
-
-       if (ctx->buf || out_fence)
-               vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
-                                            out_fence, NULL);
-
-       vmw_bo_unreference(&ctx->buf);
-       vmw_resource_unreserve(res, false, NULL, 0);
-       mutex_unlock(&res->dev_priv->cmdbuf_mutex);
 }
 
 /**
index 31311298ec0ba18f0c31b96f0338a6db40e06247..76ec570c0684f4c2e17dad2973912840c3b9281f 100644 (file)
@@ -308,24 +308,12 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
                         int increment,
                         struct vmw_kms_dirty *dirty);
 
-int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
-                                 struct vmw_buffer_object *buf,
-                                 bool interruptible,
-                                 bool validate_as_mob,
-                                 bool for_cpu_blit);
-void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf);
-void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
-                                 struct drm_file *file_priv,
-                                 struct vmw_buffer_object *buf,
-                                 struct vmw_fence_obj **out_fence,
-                                 struct drm_vmw_fence_rep __user *
-                                 user_fence_rep);
-int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
-                                   bool interruptible,
-                                   struct vmw_validation_ctx *ctx);
-void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
-void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
-                                   struct vmw_fence_obj **out_fence);
+void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
+                                     struct drm_file *file_priv,
+                                     struct vmw_validation_context *ctx,
+                                     struct vmw_fence_obj **out_fence,
+                                     struct drm_vmw_fence_rep __user *
+                                     user_fence_rep);
 int vmw_kms_readback(struct vmw_private *dev_priv,
                     struct drm_file *file_priv,
                     struct vmw_framebuffer *vfb,
index 0861c821a7fe399540726ec93e54baab5d383152..e420675e8db3ceb19b669fdd1f517dd5f276e899 100644 (file)
@@ -31,8 +31,8 @@
  */
 
 #include "vmwgfx_drv.h"
+#include "ttm_object.h"
 #include <linux/dma-buf.h>
-#include <drm/ttm/ttm_object.h>
 
 /*
  * DMA-BUF attach- and mapping methods. No need to implement
index 92003ea5a2196a6cbcac38a9e9b4add15a4e6bcb..8a029bade32a8561e0cf12e86d1e6eb28a69b002 100644 (file)
@@ -58,11 +58,11 @@ void vmw_resource_release_id(struct vmw_resource *res)
        struct vmw_private *dev_priv = res->dev_priv;
        struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
-       write_lock(&dev_priv->resource_lock);
+       spin_lock(&dev_priv->resource_lock);
        if (res->id != -1)
                idr_remove(idr, res->id);
        res->id = -1;
-       write_unlock(&dev_priv->resource_lock);
+       spin_unlock(&dev_priv->resource_lock);
 }
 
 static void vmw_resource_release(struct kref *kref)
@@ -73,10 +73,9 @@ static void vmw_resource_release(struct kref *kref)
        int id;
        struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
-       write_lock(&dev_priv->resource_lock);
-       res->avail = false;
+       spin_lock(&dev_priv->resource_lock);
        list_del_init(&res->lru_head);
-       write_unlock(&dev_priv->resource_lock);
+       spin_unlock(&dev_priv->resource_lock);
        if (res->backup) {
                struct ttm_buffer_object *bo = &res->backup->base;
 
@@ -108,10 +107,10 @@ static void vmw_resource_release(struct kref *kref)
        else
                kfree(res);
 
-       write_lock(&dev_priv->resource_lock);
+       spin_lock(&dev_priv->resource_lock);
        if (id != -1)
                idr_remove(idr, id);
-       write_unlock(&dev_priv->resource_lock);
+       spin_unlock(&dev_priv->resource_lock);
 }
 
 void vmw_resource_unreference(struct vmw_resource **p_res)
@@ -140,13 +139,13 @@ int vmw_resource_alloc_id(struct vmw_resource *res)
        BUG_ON(res->id != -1);
 
        idr_preload(GFP_KERNEL);
-       write_lock(&dev_priv->resource_lock);
+       spin_lock(&dev_priv->resource_lock);
 
        ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
        if (ret >= 0)
                res->id = ret;
 
-       write_unlock(&dev_priv->resource_lock);
+       spin_unlock(&dev_priv->resource_lock);
        idr_preload_end();
        return ret < 0 ? ret : 0;
 }
@@ -170,7 +169,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
        kref_init(&res->kref);
        res->hw_destroy = NULL;
        res->res_free = res_free;
-       res->avail = false;
        res->dev_priv = dev_priv;
        res->func = func;
        INIT_LIST_HEAD(&res->lru_head);
@@ -187,28 +185,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
                return vmw_resource_alloc_id(res);
 }
 
-/**
- * vmw_resource_activate
- *
- * @res:        Pointer to the newly created resource
- * @hw_destroy: Destroy function. NULL if none.
- *
- * Activate a resource after the hardware has been made aware of it.
- * Set tye destroy function to @destroy. Typically this frees the
- * resource and destroys the hardware resources associated with it.
- * Activate basically means that the function vmw_resource_lookup will
- * find it.
- */
-void vmw_resource_activate(struct vmw_resource *res,
-                          void (*hw_destroy) (struct vmw_resource *))
-{
-       struct vmw_private *dev_priv = res->dev_priv;
-
-       write_lock(&dev_priv->resource_lock);
-       res->avail = true;
-       res->hw_destroy = hw_destroy;
-       write_unlock(&dev_priv->resource_lock);
-}
 
 /**
  * vmw_user_resource_lookup_handle - lookup a struct resource from a
@@ -243,15 +219,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
                goto out_bad_resource;
 
        res = converter->base_obj_to_res(base);
-
-       read_lock(&dev_priv->resource_lock);
-       if (!res->avail || res->res_free != converter->res_free) {
-               read_unlock(&dev_priv->resource_lock);
-               goto out_bad_resource;
-       }
-
        kref_get(&res->kref);
-       read_unlock(&dev_priv->resource_lock);
 
        *p_res = res;
        ret = 0;
@@ -262,6 +230,41 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
        return ret;
 }
 
+/**
+ * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * TTM user-space handle and perform basic type checks
+ *
+ * @dev_priv:     Pointer to a device private struct
+ * @tfile:        Pointer to a struct ttm_object_file identifying the caller
+ * @handle:       The TTM user-space handle
+ * @converter:    Pointer to an object describing the resource type
+ * @p_res:        On successful return the location pointed to will contain
+ *                a pointer to a refcounted struct vmw_resource.
+ *
+ * If the handle can't be found or is associated with an incorrect resource
+ * type, -EINVAL will be returned.
+ */
+struct vmw_resource *
+vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
+                                     struct ttm_object_file *tfile,
+                                     uint32_t handle,
+                                     const struct vmw_user_resource_conv
+                                     *converter)
+{
+       struct ttm_base_object *base;
+
+       base = ttm_base_object_noref_lookup(tfile, handle);
+       if (!base)
+               return ERR_PTR(-ESRCH);
+
+       if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
+               ttm_base_object_noref_release();
+               return ERR_PTR(-EINVAL);
+       }
+
+       return converter->base_obj_to_res(base);
+}
+
 /**
  * Helper function that looks either a surface or bo.
  *
@@ -422,10 +425,10 @@ void vmw_resource_unreserve(struct vmw_resource *res,
        if (!res->func->may_evict || res->id == -1 || res->pin_count)
                return;
 
-       write_lock(&dev_priv->resource_lock);
+       spin_lock(&dev_priv->resource_lock);
        list_add_tail(&res->lru_head,
                      &res->dev_priv->res_lru[res->func->res_type]);
-       write_unlock(&dev_priv->resource_lock);
+       spin_unlock(&dev_priv->resource_lock);
 }
 
 /**
@@ -504,9 +507,9 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
        struct vmw_private *dev_priv = res->dev_priv;
        int ret;
 
-       write_lock(&dev_priv->resource_lock);
+       spin_lock(&dev_priv->resource_lock);
        list_del_init(&res->lru_head);
-       write_unlock(&dev_priv->resource_lock);
+       spin_unlock(&dev_priv->resource_lock);
 
        if (res->func->needs_backup && res->backup == NULL &&
            !no_backup) {
@@ -587,15 +590,18 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
 /**
  * vmw_resource_validate - Make a resource up-to-date and visible
  *                         to the device.
- *
- * @res:            The resource to make visible to the device.
+ * @res: The resource to make visible to the device.
+ * @intr: Perform waits interruptible if possible.
  *
  * On succesful return, any backup DMA buffer pointed to by @res->backup will
  * be reserved and validated.
  * On hardware resource shortage, this function will repeatedly evict
  * resources of the same type until the validation succeeds.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
+ * on failure.
  */
-int vmw_resource_validate(struct vmw_resource *res)
+int vmw_resource_validate(struct vmw_resource *res, bool intr)
 {
        int ret;
        struct vmw_resource *evict_res;
@@ -616,12 +622,12 @@ int vmw_resource_validate(struct vmw_resource *res)
                if (likely(ret != -EBUSY))
                        break;
 
-               write_lock(&dev_priv->resource_lock);
+               spin_lock(&dev_priv->resource_lock);
                if (list_empty(lru_list) || !res->func->may_evict) {
                        DRM_ERROR("Out of device device resources "
                                  "for %s.\n", res->func->type_name);
                        ret = -EBUSY;
-                       write_unlock(&dev_priv->resource_lock);
+                       spin_unlock(&dev_priv->resource_lock);
                        break;
                }
 
@@ -630,14 +636,14 @@ int vmw_resource_validate(struct vmw_resource *res)
                                          lru_head));
                list_del_init(&evict_res->lru_head);
 
-               write_unlock(&dev_priv->resource_lock);
+               spin_unlock(&dev_priv->resource_lock);
 
                /* Trylock backup buffers with a NULL ticket. */
-               ret = vmw_resource_do_evict(NULL, evict_res, true);
+               ret = vmw_resource_do_evict(NULL, evict_res, intr);
                if (unlikely(ret != 0)) {
-                       write_lock(&dev_priv->resource_lock);
+                       spin_lock(&dev_priv->resource_lock);
                        list_add_tail(&evict_res->lru_head, lru_list);
-                       write_unlock(&dev_priv->resource_lock);
+                       spin_unlock(&dev_priv->resource_lock);
                        if (ret == -ERESTARTSYS ||
                            ++err_count > VMW_RES_EVICT_ERR_COUNT) {
                                vmw_resource_unreference(&evict_res);
@@ -819,7 +825,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
        struct ww_acquire_ctx ticket;
 
        do {
-               write_lock(&dev_priv->resource_lock);
+               spin_lock(&dev_priv->resource_lock);
 
                if (list_empty(lru_list))
                        goto out_unlock;
@@ -828,14 +834,14 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
                        list_first_entry(lru_list, struct vmw_resource,
                                         lru_head));
                list_del_init(&evict_res->lru_head);
-               write_unlock(&dev_priv->resource_lock);
+               spin_unlock(&dev_priv->resource_lock);
 
                /* Wait lock backup buffers with a ticket. */
                ret = vmw_resource_do_evict(&ticket, evict_res, false);
                if (unlikely(ret != 0)) {
-                       write_lock(&dev_priv->resource_lock);
+                       spin_lock(&dev_priv->resource_lock);
                        list_add_tail(&evict_res->lru_head, lru_list);
-                       write_unlock(&dev_priv->resource_lock);
+                       spin_unlock(&dev_priv->resource_lock);
                        if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
                                vmw_resource_unreference(&evict_res);
                                return;
@@ -846,7 +852,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
        } while (1);
 
 out_unlock:
-       write_unlock(&dev_priv->resource_lock);
+       spin_unlock(&dev_priv->resource_lock);
 }
 
 /**
@@ -914,7 +920,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
                        /* Do we really need to pin the MOB as well? */
                        vmw_bo_pin_reserved(vbo, true);
                }
-               ret = vmw_resource_validate(res);
+               ret = vmw_resource_validate(res, interruptible);
                if (vbo)
                        ttm_bo_unreserve(&vbo->base);
                if (ret)
index a8c1c5ebd71d39a0f2235d08ecd3e552ff0fcf8f..7e19eba0b0b8cd930be900aa0a4354dc5ac6c714 100644 (file)
 
 #include "vmwgfx_drv.h"
 
+/*
+ * Extra memory required by the resource id's ida storage, which is allocated
+ * separately from the base object itself. We estimate an on-average 128 bytes
+ * per ida.
+ */
 #define VMW_IDA_ACC_SIZE 128
 
 enum vmw_cmdbuf_res_state {
@@ -120,8 +125,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
                      bool delay_id,
                      void (*res_free) (struct vmw_resource *res),
                      const struct vmw_res_func *func);
-void vmw_resource_activate(struct vmw_resource *res,
-                          void (*hw_destroy) (struct vmw_resource *));
 int
 vmw_simple_resource_create_ioctl(struct drm_device *dev,
                                 void *data,
index ad0de7f0cd60f07006e58fe4a544b19b973ccb87..53316b1bda3d6de9744313c66ebf27eda5642826 100644 (file)
@@ -946,16 +946,20 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
        struct vmw_framebuffer_surface *vfbs =
                container_of(framebuffer, typeof(*vfbs), base);
        struct vmw_kms_sou_surface_dirty sdirty;
-       struct vmw_validation_ctx ctx;
+       DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
        int ret;
 
        if (!srf)
                srf = &vfbs->surface->res;
 
-       ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
+       ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
        if (ret)
                return ret;
 
+       ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
+       if (ret)
+               goto out_unref;
+
        sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
        sdirty.base.clip = vmw_sou_surface_clip;
        sdirty.base.dev_priv = dev_priv;
@@ -972,9 +976,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
        ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
                                   dest_x, dest_y, num_clips, inc,
                                   &sdirty.base);
-       vmw_kms_helper_resource_finish(&ctx, out_fence);
+       vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
+                                        NULL);
 
        return ret;
+
+out_unref:
+       vmw_validation_unref_lists(&val_ctx);
+       return ret;
 }
 
 /**
@@ -1051,13 +1060,17 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
                container_of(framebuffer, struct vmw_framebuffer_bo,
                             base)->buffer;
        struct vmw_kms_dirty dirty;
+       DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
        int ret;
 
-       ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
-                                           false, false);
+       ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
        if (ret)
                return ret;
 
+       ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
+       if (ret)
+               goto out_unref;
+
        ret = do_bo_define_gmrfb(dev_priv, framebuffer);
        if (unlikely(ret != 0))
                goto out_revert;
@@ -1069,12 +1082,15 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
                num_clips;
        ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
                                   0, 0, num_clips, increment, &dirty);
-       vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
+       vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
+                                        NULL);
 
        return ret;
 
 out_revert:
-       vmw_kms_helper_buffer_revert(buf);
+       vmw_validation_revert(&val_ctx);
+out_unref:
+       vmw_validation_unref_lists(&val_ctx);
 
        return ret;
 }
@@ -1150,13 +1166,17 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
        struct vmw_buffer_object *buf =
                container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
        struct vmw_kms_dirty dirty;
+       DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
        int ret;
 
-       ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false,
-                                           false);
+       ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
        if (ret)
                return ret;
 
+       ret = vmw_validation_prepare(&val_ctx, NULL, true);
+       if (ret)
+               goto out_unref;
+
        ret = do_bo_define_gmrfb(dev_priv, vfb);
        if (unlikely(ret != 0))
                goto out_revert;
@@ -1168,13 +1188,15 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
                num_clips;
        ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
                                   0, 0, num_clips, 1, &dirty);
-       vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
-                                    user_fence_rep);
+       vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
+                                        user_fence_rep);
 
        return ret;
 
 out_revert:
-       vmw_kms_helper_buffer_revert(buf);
-
+       vmw_validation_revert(&val_ctx);
+out_unref:
+       vmw_validation_unref_lists(&val_ctx);
+       
        return ret;
 }
index fe4842ca3b6ed21d82d782861e54aa27afe4f1c1..bf32fe44621986758ca59982e69a896a77d85e2f 100644 (file)
@@ -186,7 +186,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
        shader->num_input_sig = num_input_sig;
        shader->num_output_sig = num_output_sig;
 
-       vmw_resource_activate(res, vmw_hw_shader_destroy);
+       res->hw_destroy = vmw_hw_shader_destroy;
        return 0;
 }
 
@@ -562,7 +562,7 @@ void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
 {
        struct vmw_dx_shader *entry, *next;
 
-       WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+       lockdep_assert_held_once(&dev_priv->binding_mutex);
 
        list_for_each_entry_safe(entry, next, list, cotable_head) {
                WARN_ON(vmw_dx_shader_scrub(&entry->res));
@@ -636,7 +636,8 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
 
        res = &shader->res;
        shader->ctx = ctx;
-       shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER);
+       shader->cotable = vmw_resource_reference
+               (vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER));
        shader->id = user_key;
        shader->committed = false;
        INIT_LIST_HEAD(&shader->cotable_head);
@@ -656,7 +657,7 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
                goto out_resource_init;
 
        res->id = shader->id;
-       vmw_resource_activate(res, vmw_hw_shader_destroy);
+       res->hw_destroy = vmw_hw_shader_destroy;
 
 out_resource_init:
        vmw_resource_unreference(&res);
@@ -740,13 +741,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
        };
        int ret;
 
-       /*
-        * Approximate idr memory usage with 128 bytes. It will be limited
-        * by maximum number_of shaders anyway.
-        */
        if (unlikely(vmw_user_shader_size == 0))
                vmw_user_shader_size =
-                       ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
+                       ttm_round_pot(sizeof(struct vmw_user_shader)) +
+                       VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
 
        ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
                                   vmw_user_shader_size,
@@ -792,7 +790,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
        }
 
        if (handle)
-               *handle = ushader->base.hash.key;
+               *handle = ushader->base.handle;
 out_err:
        vmw_resource_unreference(&res);
 out:
@@ -814,13 +812,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
        };
        int ret;
 
-       /*
-        * Approximate idr memory usage with 128 bytes. It will be limited
-        * by maximum number_of shaders anyway.
-        */
        if (unlikely(vmw_shader_size == 0))
                vmw_shader_size =
-                       ttm_round_pot(sizeof(struct vmw_shader)) + 128;
+                       ttm_round_pot(sizeof(struct vmw_shader)) +
+                       VMW_IDA_ACC_SIZE;
 
        ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
                                   vmw_shader_size,
index 6ebc5affde1476587636d5312514fa32d3e5f3dd..6a6865384e91f21fba1890f69b3889771e683bea 100644 (file)
@@ -81,7 +81,7 @@ static int vmw_simple_resource_init(struct vmw_private *dev_priv,
                return ret;
        }
 
-       vmw_resource_activate(&simple->res, simple->func->hw_destroy);
+       simple->res.hw_destroy = simple->func->hw_destroy;
 
        return 0;
 }
@@ -159,7 +159,8 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
 
        alloc_size = offsetof(struct vmw_user_simple_resource, simple) +
          func->size;
-       account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE;
+       account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE +
+               TTM_OBJ_EXTRA_SIZE;
 
        ret = ttm_read_lock(&dev_priv->reservation_sem, true);
        if (ret)
@@ -208,7 +209,7 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
                goto out_err;
        }
 
-       func->set_arg_handle(data, usimple->base.hash.key);
+       func->set_arg_handle(data, usimple->base.handle);
 out_err:
        vmw_resource_unreference(&res);
 out_ret:
index e9b6b7baa00946f35cb0a6eb2df09ce75c82a6a1..bc8bb690f1ea03a09b4cdbcafe4ed6b9b9a6f2b8 100644 (file)
@@ -208,7 +208,7 @@ static int vmw_view_destroy(struct vmw_resource *res)
                union vmw_view_destroy body;
        } *cmd;
 
-       WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+       lockdep_assert_held_once(&dev_priv->binding_mutex);
        vmw_binding_res_list_scrub(&res->binding_head);
 
        if (!view->committed || res->id == -1)
@@ -366,7 +366,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
        res = &view->res;
        view->ctx = ctx;
        view->srf = vmw_resource_reference(srf);
-       view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]);
+       view->cotable = vmw_resource_reference
+               (vmw_context_cotable(ctx, vmw_view_cotables[view_type]));
        view->view_type = view_type;
        view->view_id = user_key;
        view->cmd_size = cmd_size;
@@ -386,7 +387,7 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
                goto out_resource_init;
 
        res->id = view->view_id;
-       vmw_resource_activate(res, vmw_hw_view_destroy);
+       res->hw_destroy = vmw_hw_view_destroy;
 
 out_resource_init:
        vmw_resource_unreference(&res);
@@ -439,7 +440,7 @@ void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
 {
        struct vmw_view *entry, *next;
 
-       WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+       lockdep_assert_held_once(&dev_priv->binding_mutex);
 
        list_for_each_entry_safe(entry, next, list, cotable_head)
                WARN_ON(vmw_view_destroy(&entry->res));
@@ -459,7 +460,7 @@ void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
 {
        struct vmw_view *entry, *next;
 
-       WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+       lockdep_assert_held_once(&dev_priv->binding_mutex);
 
        list_for_each_entry_safe(entry, next, list, srf_head)
                WARN_ON(vmw_view_destroy(&entry->res));
index f30e839f7bfd2d946bbe908e20211719646ff8b5..e086565c1da6d80f2a924947230ccd14ca7175fc 100644 (file)
@@ -759,17 +759,21 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
        struct vmw_stdu_dirty ddirty;
        int ret;
        bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
+       DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
 
        /*
         * VMs without 3D support don't have the surface DMA command and
         * we'll be using a CPU blit, and the framebuffer should be moved out
         * of VRAM.
         */
-       ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
-                                           false, cpu_blit);
+       ret = vmw_validation_add_bo(&val_ctx, buf, false, cpu_blit);
        if (ret)
                return ret;
 
+       ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
+       if (ret)
+               goto out_unref;
+
        ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
                SVGA3D_READ_HOST_VRAM;
        ddirty.left = ddirty.top = S32_MAX;
@@ -796,9 +800,13 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
 
        ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
                                   0, 0, num_clips, increment, &ddirty.base);
-       vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
-                                    user_fence_rep);
 
+       vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
+                                        user_fence_rep);
+       return ret;
+
+out_unref:
+       vmw_validation_unref_lists(&val_ctx);
        return ret;
 }
 
@@ -924,16 +932,20 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
        struct vmw_framebuffer_surface *vfbs =
                container_of(framebuffer, typeof(*vfbs), base);
        struct vmw_stdu_dirty sdirty;
-       struct vmw_validation_ctx ctx;
+       DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
        int ret;
 
        if (!srf)
                srf = &vfbs->surface->res;
 
-       ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
+       ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
        if (ret)
                return ret;
 
+       ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
+       if (ret)
+               goto out_unref;
+
        if (vfbs->is_bo_proxy) {
                ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
                if (ret)
@@ -954,8 +966,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
                                   dest_x, dest_y, num_clips, inc,
                                   &sdirty.base);
 out_finish:
-       vmw_kms_helper_resource_finish(&ctx, out_fence);
+       vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
+                                        NULL);
+
+       return ret;
 
+out_unref:
+       vmw_validation_unref_lists(&val_ctx);
        return ret;
 }
 
index 80a01cd4c051338654b629f3205b32259dd53ba3..ef09f7edf931b9825fb993745daaa01e07588a99 100644 (file)
@@ -614,7 +614,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
         */
 
        INIT_LIST_HEAD(&srf->view_list);
-       vmw_resource_activate(res, vmw_hw_surface_destroy);
+       res->hw_destroy = vmw_hw_surface_destroy;
        return ret;
 }
 
@@ -731,7 +731,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
 
        if (unlikely(vmw_user_surface_size == 0))
                vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
-                       128;
+                       VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
 
        num_sizes = 0;
        for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
@@ -744,7 +744,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
            num_sizes == 0)
                return -EINVAL;
 
-       size = vmw_user_surface_size + 128 +
+       size = vmw_user_surface_size +
                ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
                ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
 
@@ -886,7 +886,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
                goto out_unlock;
        }
 
-       rep->sid = user_srf->prime.base.hash.key;
+       rep->sid = user_srf->prime.base.handle;
        vmw_resource_unreference(&res);
 
        ttm_read_unlock(&dev_priv->reservation_sem);
@@ -1024,7 +1024,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
        if (unlikely(ret != 0)) {
                DRM_ERROR("copy_to_user failed %p %u\n",
                          user_sizes, srf->num_sizes);
-               ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
+               ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
                ret = -EFAULT;
        }
 
@@ -1613,9 +1613,9 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
 
        if (unlikely(vmw_user_surface_size == 0))
                vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
-                       128;
+                       VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
 
-       size = vmw_user_surface_size + 128;
+       size = vmw_user_surface_size;
 
        /* Define a surface based on the parameters. */
        ret = vmw_surface_gb_priv_define(dev,
@@ -1687,7 +1687,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
                goto out_unlock;
        }
 
-       rep->handle      = user_srf->prime.base.hash.key;
+       rep->handle      = user_srf->prime.base.handle;
        rep->backup_size = res->backup_size;
        if (res->backup) {
                rep->buffer_map_handle =
@@ -1749,7 +1749,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
        if (unlikely(ret != 0)) {
                DRM_ERROR("Could not add a reference to a GB surface "
                          "backup buffer.\n");
-               (void) ttm_ref_object_base_unref(tfile, base->hash.key,
+               (void) ttm_ref_object_base_unref(tfile, base->handle,
                                                 TTM_REF_USAGE);
                goto out_bad_resource;
        }
@@ -1763,7 +1763,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
        rep->creq.base.array_size = srf->array_size;
        rep->creq.base.buffer_handle = backup_handle;
        rep->creq.base.base_size = srf->base_size;
-       rep->crep.handle = user_srf->prime.base.hash.key;
+       rep->crep.handle = user_srf->prime.base.handle;
        rep->crep.backup_size = srf->res.backup_size;
        rep->crep.buffer_handle = backup_handle;
        rep->crep.buffer_map_handle =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
new file mode 100644 (file)
index 0000000..184025f
--- /dev/null
@@ -0,0 +1,770 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright Â© 2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#include <linux/slab.h>
+#include "vmwgfx_validation.h"
+#include "vmwgfx_drv.h"
+
+/**
+ * struct vmw_validation_bo_node - Buffer object validation metadata.
+ * @base: Metadata used for TTM reservation- and validation.
+ * @hash: A hash entry used for the duplicate detection hash table.
+ * @as_mob: Validate as mob.
+ * @cpu_blit: Validate for cpu blit access.
+ *
+ * Bit fields are used since these structures are allocated and freed in
+ * large numbers and space conservation is desired.
+ */
+struct vmw_validation_bo_node {
+       struct ttm_validate_buffer base;
+       struct drm_hash_item hash;
+       u32 as_mob : 1;
+       u32 cpu_blit : 1;
+};
+
+/**
+ * struct vmw_validation_res_node - Resource validation metadata.
+ * @head: List head for the resource validation list.
+ * @hash: A hash entry used for the duplicate detection hash table.
+ * @res: Reference counted resource pointer.
+ * @new_backup: Non ref-counted pointer to new backup buffer to be assigned
+ * to a resource.
+ * @new_backup_offset: Offset into the new backup mob for resources that can
+ * share MOBs.
+ * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
+ * the command stream provides a mob bind operation.
+ * @switching_backup: The validation process is switching backup MOB.
+ * @first_usage: True iff the resource has been seen only once in the current
+ * validation batch.
+ * @reserved: Whether the resource is currently reserved by this process.
+ * @private: Optionally additional memory for caller-private data.
+ *
+ * Bit fields are used since these structures are allocated and freed in
+ * large numbers and space conservation is desired.
+ */
+struct vmw_validation_res_node {
+       struct list_head head;
+       struct drm_hash_item hash;
+       struct vmw_resource *res;
+       struct vmw_buffer_object *new_backup;
+       unsigned long new_backup_offset;
+       u32 no_buffer_needed : 1;
+       u32 switching_backup : 1;
+       u32 first_usage : 1;
+       u32 reserved : 1;
+       unsigned long private[0];
+};
+
+/**
+ * vmw_validation_mem_alloc - Allocate kernel memory from the validation
+ * context based allocator
+ * @ctx: The validation context
+ * @size: The number of bytes to allocated.
+ *
+ * The memory allocated may not exceed PAGE_SIZE, and the returned
+ * address is aligned to sizeof(long). All memory allocated this way is
+ * reclaimed after validation when calling any of the exported functions:
+ * vmw_validation_unref_lists()
+ * vmw_validation_revert()
+ * vmw_validation_done()
+ *
+ * Return: Pointer to the allocated memory on success. NULL on failure.
+ */
+void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
+                              unsigned int size)
+{
+       void *addr;
+
+       size = vmw_validation_align(size);
+       if (size > PAGE_SIZE)
+               return NULL;
+
+       if (ctx->mem_size_left < size) {
+               struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+
+               if (!page)
+                       return NULL;
+
+               list_add_tail(&page->lru, &ctx->page_list);
+               ctx->page_address = page_address(page);
+               ctx->mem_size_left = PAGE_SIZE;
+       }
+
+       addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
+       ctx->mem_size_left -= size;
+
+       return addr;
+}
+
+/**
+ * vmw_validation_mem_free - Free all memory allocated using
+ * vmw_validation_mem_alloc()
+ * @ctx: The validation context
+ *
+ * All memory previously allocated for this context using
+ * vmw_validation_mem_alloc() is freed.
+ */
+static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
+{
+       struct page *entry, *next;
+
+       list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
+               list_del_init(&entry->lru);
+               __free_page(entry);
+       }
+
+       ctx->mem_size_left = 0;
+}
+
+/**
+ * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
+ * validation context's lists.
+ * @ctx: The validation context to search.
+ * @vbo: The buffer object to search for.
+ *
+ * Return: Pointer to the struct vmw_validation_bo_node referencing the
+ * duplicate, or NULL if none found.
+ */
+static struct vmw_validation_bo_node *
+vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
+                          struct vmw_buffer_object *vbo)
+{
+       struct  vmw_validation_bo_node *bo_node = NULL;
+
+       if (!ctx->merge_dups)
+               return NULL;
+
+       if (ctx->ht) {
+               struct drm_hash_item *hash;
+
+               if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
+                       bo_node = container_of(hash, typeof(*bo_node), hash);
+       } else {
+               struct  vmw_validation_bo_node *entry;
+
+               list_for_each_entry(entry, &ctx->bo_list, base.head) {
+                       if (entry->base.bo == &vbo->base) {
+                               bo_node = entry;
+                               break;
+                       }
+               }
+       }
+
+       return bo_node;
+}
+
+/**
+ * vmw_validation_find_res_dup - Find a duplicate resource entry in the
+ * validation context's lists.
+ * @ctx: The validation context to search.
+ * @vbo: The buffer object to search for.
+ *
+ * Return: Pointer to the struct vmw_validation_bo_node referencing the
+ * duplicate, or NULL if none found.
+ */
+static struct vmw_validation_res_node *
+vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
+                           struct vmw_resource *res)
+{
+       struct  vmw_validation_res_node *res_node = NULL;
+
+       if (!ctx->merge_dups)
+               return NULL;
+
+       if (ctx->ht) {
+               struct drm_hash_item *hash;
+
+               if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash))
+                       res_node = container_of(hash, typeof(*res_node), hash);
+       } else {
+               struct  vmw_validation_res_node *entry;
+
+               list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
+                       if (entry->res == res) {
+                               res_node = entry;
+                               goto out;
+                       }
+               }
+
+               list_for_each_entry(entry, &ctx->resource_list, head) {
+                       if (entry->res == res) {
+                               res_node = entry;
+                               break;
+                       }
+               }
+
+       }
+out:
+       return res_node;
+}
+
+/**
+ * vmw_validation_add_bo - Add a buffer object to the validation context.
+ * @ctx: The validation context.
+ * @vbo: The buffer object.
+ * @as_mob: Validate as mob, otherwise suitable for GMR operations.
+ * @cpu_blit: Validate in a page-mappable location.
+ *
+ * Return: Zero on success, negative error code otherwise.
+ */
+int vmw_validation_add_bo(struct vmw_validation_context *ctx,
+                         struct vmw_buffer_object *vbo,
+                         bool as_mob,
+                         bool cpu_blit)
+{
+       struct vmw_validation_bo_node *bo_node;
+
+       bo_node = vmw_validation_find_bo_dup(ctx, vbo);
+       if (bo_node) {
+               if (bo_node->as_mob != as_mob ||
+                   bo_node->cpu_blit != cpu_blit) {
+                       DRM_ERROR("Inconsistent buffer usage.\n");
+                       return -EINVAL;
+               }
+       } else {
+               struct ttm_validate_buffer *val_buf;
+               int ret;
+
+               bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
+               if (!bo_node)
+                       return -ENOMEM;
+
+               if (ctx->ht) {
+                       bo_node->hash.key = (unsigned long) vbo;
+                       ret = drm_ht_insert_item(ctx->ht, &bo_node->hash);
+                       if (ret) {
+                               DRM_ERROR("Failed to initialize a buffer "
+                                         "validation entry.\n");
+                               return ret;
+                       }
+               }
+               val_buf = &bo_node->base;
+               val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
+               if (!val_buf->bo)
+                       return -ESRCH;
+               val_buf->shared = false;
+               list_add_tail(&val_buf->head, &ctx->bo_list);
+               bo_node->as_mob = as_mob;
+               bo_node->cpu_blit = cpu_blit;
+       }
+
+       return 0;
+}
+
+/**
+ * vmw_validation_add_resource - Add a resource to the validation context.
+ * @ctx: The validation context.
+ * @res: The resource.
+ * @priv_size: Size of private, additional metadata.
+ * @p_node: Output pointer of additional metadata address.
+ * @first_usage: Whether this was the first time this resource was seen.
+ *
+ * Return: Zero on success, negative error code otherwise.
+ */
+int vmw_validation_add_resource(struct vmw_validation_context *ctx,
+                               struct vmw_resource *res,
+                               size_t priv_size,
+                               void **p_node,
+                               bool *first_usage)
+{
+       struct vmw_validation_res_node *node;
+       int ret;
+
+       node = vmw_validation_find_res_dup(ctx, res);
+       if (node) {
+               node->first_usage = 0;
+               goto out_fill;
+       }
+
+       node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
+       if (!node) {
+               DRM_ERROR("Failed to allocate a resource validation "
+                         "entry.\n");
+               return -ENOMEM;
+       }
+
+       if (ctx->ht) {
+               node->hash.key = (unsigned long) res;
+               ret = drm_ht_insert_item(ctx->ht, &node->hash);
+               if (ret) {
+                       DRM_ERROR("Failed to initialize a resource validation "
+                                 "entry.\n");
+                       return ret;
+               }
+       }
+       node->res = vmw_resource_reference_unless_doomed(res);
+       if (!node->res)
+               return -ESRCH;
+
+       node->first_usage = 1;
+       if (!res->dev_priv->has_mob) {
+               list_add_tail(&node->head, &ctx->resource_list);
+       } else {
+               switch (vmw_res_type(res)) {
+               case vmw_res_context:
+               case vmw_res_dx_context:
+                       list_add(&node->head, &ctx->resource_ctx_list);
+                       break;
+               case vmw_res_cotable:
+                       list_add_tail(&node->head, &ctx->resource_ctx_list);
+                       break;
+               default:
+                       list_add_tail(&node->head, &ctx->resource_list);
+                       break;
+               }
+       }
+
+out_fill:
+       if (first_usage)
+               *first_usage = node->first_usage;
+       if (p_node)
+               *p_node = &node->private;
+
+       return 0;
+}
+
+/**
+ * vmw_validation_res_switch_backup - Register a backup MOB switch during
+ * validation.
+ * @ctx: The validation context.
+ * @val_private: The additional meta-data pointer returned when the
+ * resource was registered with the validation context. Used to identify
+ * the resource.
+ * @vbo: The new backup buffer object MOB. This buffer object needs to have
+ * already been registered with the validation context.
+ * @backup_offset: Offset into the new backup MOB.
+ */
+void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
+                                     void *val_private,
+                                     struct vmw_buffer_object *vbo,
+                                     unsigned long backup_offset)
+{
+       struct vmw_validation_res_node *val;
+
+       val = container_of(val_private, typeof(*val), private);
+
+       val->switching_backup = 1;
+       if (val->first_usage)
+               val->no_buffer_needed = 1;
+
+       val->new_backup = vbo;
+       val->new_backup_offset = backup_offset;
+}
+
+/**
+ * vmw_validation_res_reserve - Reserve all resources registered with this
+ * validation context.
+ * @ctx: The validation context.
+ * @intr: Use interruptible waits when possible.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
+ * code on failure.
+ */
+int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
+                              bool intr)
+{
+       struct vmw_validation_res_node *val;
+       int ret = 0;
+
+       list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
+
+       list_for_each_entry(val, &ctx->resource_list, head) {
+               struct vmw_resource *res = val->res;
+
+               ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
+               if (ret)
+                       goto out_unreserve;
+
+               val->reserved = 1;
+               if (res->backup) {
+                       struct vmw_buffer_object *vbo = res->backup;
+
+                       ret = vmw_validation_add_bo
+                               (ctx, vbo, vmw_resource_needs_backup(res),
+                                false);
+                       if (ret)
+                               goto out_unreserve;
+               }
+       }
+
+       return 0;
+
+out_unreserve:
+       vmw_validation_res_unreserve(ctx, true);
+       return ret;
+}
+
+/**
+ * vmw_validation_res_unreserve - Unreserve all reserved resources
+ * registered with this validation context.
+ * @ctx: The validation context.
+ * @backoff: Whether this is a backoff- of a commit-type operation. This
+ * is used to determine whether to switch backup MOBs or not.
+ */
+void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
+                                bool backoff)
+{
+       struct vmw_validation_res_node *val;
+
+       list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
+
+       list_for_each_entry(val, &ctx->resource_list, head) {
+               if (val->reserved)
+                       vmw_resource_unreserve(val->res,
+                                              !backoff &&
+                                              val->switching_backup,
+                                              val->new_backup,
+                                              val->new_backup_offset);
+       }
+}
+
+/**
+ * vmw_validation_bo_validate_single - Validate a single buffer object.
+ * @bo: The TTM buffer object base.
+ * @interruptible: Whether to perform waits interruptible if possible.
+ * @validate_as_mob: Whether to validate in MOB memory.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
+ * code on failure.
+ */
+int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
+                                     bool interruptible,
+                                     bool validate_as_mob)
+{
+       struct vmw_buffer_object *vbo =
+               container_of(bo, struct vmw_buffer_object, base);
+       struct ttm_operation_ctx ctx = {
+               .interruptible = interruptible,
+               .no_wait_gpu = false
+       };
+       int ret;
+
+       if (vbo->pin_count > 0)
+               return 0;
+
+       if (validate_as_mob)
+               return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
+
+       /**
+        * Put BO in VRAM if there is space, otherwise as a GMR.
+        * If there is no space in VRAM and GMR ids are all used up,
+        * start evicting GMRs to make room. If the DMA buffer can't be
+        * used as a GMR, this will return -ENOMEM.
+        */
+
+       ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
+       if (ret == 0 || ret == -ERESTARTSYS)
+               return ret;
+
+       /**
+        * If that failed, try VRAM again, this time evicting
+        * previous contents.
+        */
+
+       ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
+       return ret;
+}
+
+/**
+ * vmw_validation_bo_validate - Validate all buffer objects registered with
+ * the validation context.
+ * @ctx: The validation context.
+ * @intr: Whether to perform waits interruptible if possible.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted,
+ * negative error code on failure.
+ */
+int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
+{
+       struct vmw_validation_bo_node *entry;
+       int ret;
+
+       list_for_each_entry(entry, &ctx->bo_list, base.head) {
+               if (entry->cpu_blit) {
+                       struct ttm_operation_ctx ctx = {
+                               .interruptible = intr,
+                               .no_wait_gpu = false
+                       };
+
+                       ret = ttm_bo_validate(entry->base.bo,
+                                             &vmw_nonfixed_placement, &ctx);
+               } else {
+                       ret = vmw_validation_bo_validate_single
+                       (entry->base.bo, intr, entry->as_mob);
+               }
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
+/**
+ * vmw_validation_res_validate - Validate all resources registered with the
+ * validation context.
+ * @ctx: The validation context.
+ * @intr: Whether to perform waits interruptible if possible.
+ *
+ * Before this function is called, all resource backup buffers must have
+ * been validated.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted,
+ * negative error code on failure.
+ */
+int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
+{
+       struct vmw_validation_res_node *val;
+       int ret;
+
+       list_for_each_entry(val, &ctx->resource_list, head) {
+               struct vmw_resource *res = val->res;
+               struct vmw_buffer_object *backup = res->backup;
+
+               ret = vmw_resource_validate(res, intr);
+               if (ret) {
+                       if (ret != -ERESTARTSYS)
+                               DRM_ERROR("Failed to validate resource.\n");
+                       return ret;
+               }
+
+               /* Check if the resource switched backup buffer */
+               if (backup && res->backup && (backup != res->backup)) {
+                       struct vmw_buffer_object *vbo = res->backup;
+
+                       ret = vmw_validation_add_bo
+                               (ctx, vbo, vmw_resource_needs_backup(res),
+                                false);
+                       if (ret)
+                               return ret;
+               }
+       }
+       return 0;
+}
+
+/**
+ * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
+ * and unregister it from this validation context.
+ * @ctx: The validation context.
+ *
+ * The hash table used for duplicate finding is an expensive resource and
+ * may be protected by mutexes that may cause deadlocks during resource
+ * unreferencing if held. After resource- and buffer object registering,
+ * there is no longer any use for this hash table, so allow freeing it
+ * either to shorten any mutex locking time, or before resources- and
+ * buffer objects are freed during validation context cleanup.
+ */
+void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
+{
+       struct vmw_validation_bo_node *entry;
+       struct vmw_validation_res_node *val;
+
+       if (!ctx->ht)
+               return;
+
+       list_for_each_entry(entry, &ctx->bo_list, base.head)
+               (void) drm_ht_remove_item(ctx->ht, &entry->hash);
+
+       list_for_each_entry(val, &ctx->resource_list, head)
+               (void) drm_ht_remove_item(ctx->ht, &val->hash);
+
+       list_for_each_entry(val, &ctx->resource_ctx_list, head)
+               (void) drm_ht_remove_item(ctx->ht, &val->hash);
+
+       ctx->ht = NULL;
+}
+
+/**
+ * vmw_validation_unref_lists - Unregister previously registered buffer
+ * object and resources.
+ * @ctx: The validation context.
+ *
+ * Note that this function may cause buffer object- and resource destructors
+ * to be invoked.
+ */
+void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
+{
+       struct vmw_validation_bo_node *entry;
+       struct vmw_validation_res_node *val;
+
+       list_for_each_entry(entry, &ctx->bo_list, base.head)
+               ttm_bo_unref(&entry->base.bo);
+
+       list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
+       list_for_each_entry(val, &ctx->resource_list, head)
+               vmw_resource_unreference(&val->res);
+
+       /*
+        * No need to detach each list entry since they are all freed with
+        * vmw_validation_free_mem. Just make the inaccessible.
+        */
+       INIT_LIST_HEAD(&ctx->bo_list);
+       INIT_LIST_HEAD(&ctx->resource_list);
+
+       vmw_validation_mem_free(ctx);
+}
+
+/**
+ * vmw_validation_prepare - Prepare a validation context for command
+ * submission.
+ * @ctx: The validation context.
+ * @mutex: The mutex used to protect resource reservation.
+ * @intr: Whether to perform waits interruptible if possible.
+ *
+ * Note that the single reservation mutex @mutex is an unfortunate
+ * construct. Ideally resource reservation should be moved to per-resource
+ * ww_mutexes.
+ * If this functions doesn't return Zero to indicate success, all resources
+ * are left unreserved but still referenced.
+ * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
+ * on error.
+ */
+int vmw_validation_prepare(struct vmw_validation_context *ctx,
+                          struct mutex *mutex,
+                          bool intr)
+{
+       int ret = 0;
+
+       if (mutex) {
+               if (intr)
+                       ret = mutex_lock_interruptible(mutex);
+               else
+                       mutex_lock(mutex);
+               if (ret)
+                       return -ERESTARTSYS;
+       }
+
+       ctx->res_mutex = mutex;
+       ret = vmw_validation_res_reserve(ctx, intr);
+       if (ret)
+               goto out_no_res_reserve;
+
+       ret = vmw_validation_bo_reserve(ctx, intr);
+       if (ret)
+               goto out_no_bo_reserve;
+
+       ret = vmw_validation_bo_validate(ctx, intr);
+       if (ret)
+               goto out_no_validate;
+
+       ret = vmw_validation_res_validate(ctx, intr);
+       if (ret)
+               goto out_no_validate;
+
+       return 0;
+
+out_no_validate:
+       vmw_validation_bo_backoff(ctx);
+out_no_bo_reserve:
+       vmw_validation_res_unreserve(ctx, true);
+out_no_res_reserve:
+       if (mutex)
+               mutex_unlock(mutex);
+
+       return ret;
+}
+
+/**
+ * vmw_validation_revert - Revert validation actions if command submission
+ * failed.
+ *
+ * @ctx: The validation context.
+ *
+ * The caller still needs to unref resources after a call to this function.
+ */
+void vmw_validation_revert(struct vmw_validation_context *ctx)
+{
+       vmw_validation_bo_backoff(ctx);
+       vmw_validation_res_unreserve(ctx, true);
+       if (ctx->res_mutex)
+               mutex_unlock(ctx->res_mutex);
+       vmw_validation_unref_lists(ctx);
+}
+
+/**
+ * vmw_validation_cone - Commit validation actions after command submission
+ * success.
+ * @ctx: The validation context.
+ * @fence: Fence with which to fence all buffer objects taking part in the
+ * command submission.
+ *
+ * The caller does NOT need to unref resources after a call to this function.
+ */
+void vmw_validation_done(struct vmw_validation_context *ctx,
+                        struct vmw_fence_obj *fence)
+{
+       vmw_validation_bo_fence(ctx, fence);
+       vmw_validation_res_unreserve(ctx, false);
+       if (ctx->res_mutex)
+               mutex_unlock(ctx->res_mutex);
+       vmw_validation_unref_lists(ctx);
+}
+
+/**
+ * vmw_validation_preload_bo - Preload the validation memory allocator for a
+ * call to vmw_validation_add_bo().
+ * @ctx: Pointer to the validation context.
+ *
+ * Iff this function returns successfully, the next call to
+ * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
+ * but voids the guarantee.
+ *
+ * Returns: Zero if successful, %-EINVAL otherwise.
+ */
+int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
+{
+       unsigned int size = sizeof(struct vmw_validation_bo_node);
+
+       if (!vmw_validation_mem_alloc(ctx, size))
+               return -ENOMEM;
+
+       ctx->mem_size_left += size;
+       return 0;
+}
+
+/**
+ * vmw_validation_preload_res - Preload the validation memory allocator for a
+ * call to vmw_validation_add_res().
+ * @ctx: Pointer to the validation context.
+ * @size: Size of the validation node extra data. See below.
+ *
+ * Iff this function returns successfully, the next call to
+ * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
+ * sleep. An error is not fatal but voids the guarantee.
+ *
+ * Returns: Zero if successful, %-EINVAL otherwise.
+ */
+int vmw_validation_preload_res(struct vmw_validation_context *ctx,
+                              unsigned int size)
+{
+       size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
+                                   size) +
+               vmw_validation_align(sizeof(struct vmw_validation_bo_node));
+       if (!vmw_validation_mem_alloc(ctx, size))
+               return -ENOMEM;
+
+       ctx->mem_size_left += size;
+       return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
new file mode 100644 (file)
index 0000000..b57e329
--- /dev/null
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright Â© 2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef _VMWGFX_VALIDATION_H_
+#define _VMWGFX_VALIDATION_H_
+
+#include <drm/drm_hashtab.h>
+#include <linux/list.h>
+#include <linux/ww_mutex.h>
+#include <drm/ttm/ttm_execbuf_util.h>
+
+/**
+ * struct vmw_validation_context - Per command submission validation context
+ * @ht: Hash table used to find resource- or buffer object duplicates
+ * @resource_list: List head for resource validation metadata
+ * @resource_ctx_list: List head for resource validation metadata for
+ * resources that need to be validated before those in @resource_list
+ * @bo_list: List head for buffer objects
+ * @page_list: List of pages used by the memory allocator
+ * @ticket: Ticked used for ww mutex locking
+ * @res_mutex: Pointer to mutex used for resource reserving
+ * @merge_dups: Whether to merge metadata for duplicate resources or
+ * buffer objects
+ * @mem_size_left: Free memory left in the last page in @page_list
+ * @page_address: Kernel virtual address of the last page in @page_list
+ */
+struct vmw_validation_context {
+       struct drm_open_hash *ht;
+       struct list_head resource_list;
+       struct list_head resource_ctx_list;
+       struct list_head bo_list;
+       struct list_head page_list;
+       struct ww_acquire_ctx ticket;
+       struct mutex *res_mutex;
+       unsigned int merge_dups;
+       unsigned int mem_size_left;
+       u8 *page_address;
+};
+
+struct vmw_buffer_object;
+struct vmw_resource;
+struct vmw_fence_obj;
+
+#if 0
+/**
+ * DECLARE_VAL_CONTEXT - Declare a validation context with initialization
+ * @_name: The name of the variable
+ * @_ht: The hash table used to find dups or NULL if none
+ * @_merge_dups: Whether to merge duplicate buffer object- or resource
+ * entries. If set to true, ideally a hash table pointer should be supplied
+ * as well unless the number of resources and buffer objects per validation
+ * is known to be very small
+ */
+#endif
+#define DECLARE_VAL_CONTEXT(_name, _ht, _merge_dups)                   \
+       struct vmw_validation_context _name =                           \
+       { .ht = _ht,                                                    \
+         .resource_list = LIST_HEAD_INIT((_name).resource_list),       \
+         .resource_ctx_list = LIST_HEAD_INIT((_name).resource_ctx_list), \
+         .bo_list = LIST_HEAD_INIT((_name).bo_list),                   \
+         .page_list = LIST_HEAD_INIT((_name).page_list),               \
+         .res_mutex = NULL,                                            \
+         .merge_dups = _merge_dups,                                    \
+         .mem_size_left = 0,                                           \
+       }
+
+/**
+ * vmw_validation_has_bos - return whether the validation context has
+ * any buffer objects registered.
+ *
+ * @ctx: The validation context
+ * Returns: Whether any buffer objects are registered
+ */
+static inline bool
+vmw_validation_has_bos(struct vmw_validation_context *ctx)
+{
+       return !list_empty(&ctx->bo_list);
+}
+
+/**
+ * vmw_validation_set_ht - Register a hash table for duplicate finding
+ * @ctx: The validation context
+ * @ht: Pointer to a hash table to use for duplicate finding
+ * This function is intended to be used if the hash table wasn't
+ * available at validation context declaration time
+ */
+static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx,
+                                        struct drm_open_hash *ht)
+{
+       ctx->ht = ht;
+}
+
+/**
+ * vmw_validation_bo_reserve - Reserve buffer objects registered with a
+ * validation context
+ * @ctx: The validation context
+ * @intr: Perform waits interruptible
+ *
+ * Return: Zero on success, -ERESTARTSYS when interrupted, negative error
+ * code on failure
+ */
+static inline int
+vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
+                         bool intr)
+{
+       return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
+                                     NULL);
+}
+
+/**
+ * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
+ * validation context
+ * @ctx: The validation context
+ *
+ * This function unreserves the buffer objects previously reserved using
+ * vmw_validation_bo_reserve. It's typically used as part of an error path
+ */
+static inline void
+vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
+{
+       ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
+}
+
+/**
+ * vmw_validation_bo_fence - Unreserve and fence buffer objects registered
+ * with a validation context
+ * @ctx: The validation context
+ *
+ * This function unreserves the buffer objects previously reserved using
+ * vmw_validation_bo_reserve, and fences them with a fence object.
+ */
+static inline void
+vmw_validation_bo_fence(struct vmw_validation_context *ctx,
+                       struct vmw_fence_obj *fence)
+{
+       ttm_eu_fence_buffer_objects(&ctx->ticket, &ctx->bo_list,
+                                   (void *) fence);
+}
+
+/**
+ * vmw_validation_context_init - Initialize a validation context
+ * @ctx: Pointer to the validation context to initialize
+ *
+ * This function initializes a validation context with @merge_dups set
+ * to false
+ */
+static inline void
+vmw_validation_context_init(struct vmw_validation_context *ctx)
+{
+       memset(ctx, 0, sizeof(*ctx));
+       INIT_LIST_HEAD(&ctx->resource_list);
+       INIT_LIST_HEAD(&ctx->resource_ctx_list);
+       INIT_LIST_HEAD(&ctx->bo_list);
+}
+
+/**
+ * vmw_validation_align - Align a validation memory allocation
+ * @val: The size to be aligned
+ *
+ * Returns: @val aligned to the granularity used by the validation memory
+ * allocator.
+ */
+static inline unsigned int vmw_validation_align(unsigned int val)
+{
+       return ALIGN(val, sizeof(long));
+}
+
+int vmw_validation_add_bo(struct vmw_validation_context *ctx,
+                         struct vmw_buffer_object *vbo,
+                         bool as_mob, bool cpu_blit);
+int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
+                                     bool interruptible,
+                                     bool validate_as_mob);
+int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr);
+void vmw_validation_unref_lists(struct vmw_validation_context *ctx);
+int vmw_validation_add_resource(struct vmw_validation_context *ctx,
+                               struct vmw_resource *res,
+                               size_t priv_size,
+                               void **p_node,
+                               bool *first_usage);
+void vmw_validation_drop_ht(struct vmw_validation_context *ctx);
+int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
+                              bool intr);
+void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
+                                 bool backoff);
+void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
+                                     void *val_private,
+                                     struct vmw_buffer_object *vbo,
+                                     unsigned long backup_offset);
+int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr);
+
+int vmw_validation_prepare(struct vmw_validation_context *ctx,
+                          struct mutex *mutex, bool intr);
+void vmw_validation_revert(struct vmw_validation_context *ctx);
+void vmw_validation_done(struct vmw_validation_context *ctx,
+                        struct vmw_fence_obj *fence);
+
+void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
+                              unsigned int size);
+int vmw_validation_preload_bo(struct vmw_validation_context *ctx);
+int vmw_validation_preload_res(struct vmw_validation_context *ctx,
+                              unsigned int size);
+#endif
index da962aa2cef5297ba7a5f78c4f499c12ac5a464a..fc6b7f8b62fb888238b83178212a16f9478564c8 100644 (file)
@@ -139,7 +139,8 @@ static int intel_th_remove(struct device *dev)
                        th->thdev[i] = NULL;
                }
 
-               th->num_thdevs = lowest;
+               if (lowest >= 0)
+                       th->num_thdevs = lowest;
        }
 
        if (thdrv->attr_group)
@@ -487,7 +488,7 @@ static const struct intel_th_subdevice {
                                .flags  = IORESOURCE_MEM,
                        },
                        {
-                               .start  = TH_MMIO_SW,
+                               .start  = 1, /* use resource[1] */
                                .end    = 0,
                                .flags  = IORESOURCE_MEM,
                        },
@@ -580,6 +581,7 @@ intel_th_subdevice_alloc(struct intel_th *th,
        struct intel_th_device *thdev;
        struct resource res[3];
        unsigned int req = 0;
+       bool is64bit = false;
        int r, err;
 
        thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
@@ -589,12 +591,18 @@ intel_th_subdevice_alloc(struct intel_th *th,
 
        thdev->drvdata = th->drvdata;
 
+       for (r = 0; r < th->num_resources; r++)
+               if (th->resource[r].flags & IORESOURCE_MEM_64) {
+                       is64bit = true;
+                       break;
+               }
+
        memcpy(res, subdev->res,
               sizeof(struct resource) * subdev->nres);
 
        for (r = 0; r < subdev->nres; r++) {
                struct resource *devres = th->resource;
-               int bar = TH_MMIO_CONFIG;
+               int bar = 0; /* cut subdevices' MMIO from resource[0] */
 
                /*
                 * Take .end == 0 to mean 'take the whole bar',
@@ -603,6 +611,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
                 */
                if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
                        bar = res[r].start;
+                       if (is64bit)
+                               bar *= 2;
                        res[r].start = 0;
                        res[r].end = resource_size(&devres[bar]) - 1;
                }
index c2e55e5d97f654cf3a392fdf1449ca9ddd47250d..1cf6290d643555d1cd9edfd114f2756b66d1b4bb 100644 (file)
@@ -160,6 +160,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
+       {
+               /* Ice Lake PCH */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
        { 0 },
 };
 
index 0bee1f4b914e751d9893875665d0b588d7e3258d..3208ad6ad54014776cd333f4ac068c56cc737484 100644 (file)
@@ -337,6 +337,39 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
        return 0;
 }
 
+/**
+ * del_gid - Delete GID table entry
+ *
+ * @ib_dev:    IB device whose GID entry to be deleted
+ * @port:      Port number of the IB device
+ * @table:     GID table of the IB device for a port
+ * @ix:                GID entry index to delete
+ *
+ */
+static void del_gid(struct ib_device *ib_dev, u8 port,
+                   struct ib_gid_table *table, int ix)
+{
+       struct ib_gid_table_entry *entry;
+
+       lockdep_assert_held(&table->lock);
+
+       pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
+                ib_dev->name, port, ix,
+                table->data_vec[ix]->attr.gid.raw);
+
+       write_lock_irq(&table->rwlock);
+       entry = table->data_vec[ix];
+       entry->state = GID_TABLE_ENTRY_PENDING_DEL;
+       /*
+        * For non RoCE protocol, GID entry slot is ready to use.
+        */
+       if (!rdma_protocol_roce(ib_dev, port))
+               table->data_vec[ix] = NULL;
+       write_unlock_irq(&table->rwlock);
+
+       put_gid_entry_locked(entry);
+}
+
 /**
  * add_modify_gid - Add or modify GID table entry
  *
@@ -358,7 +391,7 @@ static int add_modify_gid(struct ib_gid_table *table,
         * this index.
         */
        if (is_gid_entry_valid(table->data_vec[attr->index]))
-               put_gid_entry(table->data_vec[attr->index]);
+               del_gid(attr->device, attr->port_num, table, attr->index);
 
        /*
         * Some HCA's report multiple GID entries with only one valid GID, and
@@ -386,39 +419,6 @@ static int add_modify_gid(struct ib_gid_table *table,
        return ret;
 }
 
-/**
- * del_gid - Delete GID table entry
- *
- * @ib_dev:    IB device whose GID entry to be deleted
- * @port:      Port number of the IB device
- * @table:     GID table of the IB device for a port
- * @ix:                GID entry index to delete
- *
- */
-static void del_gid(struct ib_device *ib_dev, u8 port,
-                   struct ib_gid_table *table, int ix)
-{
-       struct ib_gid_table_entry *entry;
-
-       lockdep_assert_held(&table->lock);
-
-       pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
-                ib_dev->name, port, ix,
-                table->data_vec[ix]->attr.gid.raw);
-
-       write_lock_irq(&table->rwlock);
-       entry = table->data_vec[ix];
-       entry->state = GID_TABLE_ENTRY_PENDING_DEL;
-       /*
-        * For non RoCE protocol, GID entry slot is ready to use.
-        */
-       if (!rdma_protocol_roce(ib_dev, port))
-               table->data_vec[ix] = NULL;
-       write_unlock_irq(&table->rwlock);
-
-       put_gid_entry_locked(entry);
-}
-
 /* rwlock should be read locked, or lock should be held */
 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
                    const struct ib_gid_attr *val, bool default_gid,
index 5f437d1570fb02d516b1ee60f0a4cee42053da50..21863ddde63e3040b285d9decd8a2ee1c47534b8 100644 (file)
@@ -1759,6 +1759,8 @@ static int ucma_close(struct inode *inode, struct file *filp)
                mutex_lock(&mut);
                if (!ctx->closing) {
                        mutex_unlock(&mut);
+                       ucma_put_ctx(ctx);
+                       wait_for_completion(&ctx->comp);
                        /* rdma_destroy_id ensures that no event handlers are
                         * inflight for that id before releasing it.
                         */
index a21d5214afc367b260fd445642c55e6040d02dfb..e012ca80f9d196ddbb8723691ec4087a55c0d863 100644 (file)
@@ -2027,33 +2027,55 @@ static int modify_qp(struct ib_uverbs_file *file,
 
        if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
            cmd->base.cur_qp_state > IB_QPS_ERR) ||
-           cmd->base.qp_state > IB_QPS_ERR) {
+           (cmd->base.attr_mask & IB_QP_STATE &&
+           cmd->base.qp_state > IB_QPS_ERR)) {
                ret = -EINVAL;
                goto release_qp;
        }
 
-       attr->qp_state            = cmd->base.qp_state;
-       attr->cur_qp_state        = cmd->base.cur_qp_state;
-       attr->path_mtu            = cmd->base.path_mtu;
-       attr->path_mig_state      = cmd->base.path_mig_state;
-       attr->qkey                = cmd->base.qkey;
-       attr->rq_psn              = cmd->base.rq_psn;
-       attr->sq_psn              = cmd->base.sq_psn;
-       attr->dest_qp_num         = cmd->base.dest_qp_num;
-       attr->qp_access_flags     = cmd->base.qp_access_flags;
-       attr->pkey_index          = cmd->base.pkey_index;
-       attr->alt_pkey_index      = cmd->base.alt_pkey_index;
-       attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
-       attr->max_rd_atomic       = cmd->base.max_rd_atomic;
-       attr->max_dest_rd_atomic  = cmd->base.max_dest_rd_atomic;
-       attr->min_rnr_timer       = cmd->base.min_rnr_timer;
-       attr->port_num            = cmd->base.port_num;
-       attr->timeout             = cmd->base.timeout;
-       attr->retry_cnt           = cmd->base.retry_cnt;
-       attr->rnr_retry           = cmd->base.rnr_retry;
-       attr->alt_port_num        = cmd->base.alt_port_num;
-       attr->alt_timeout         = cmd->base.alt_timeout;
-       attr->rate_limit          = cmd->rate_limit;
+       if (cmd->base.attr_mask & IB_QP_STATE)
+               attr->qp_state = cmd->base.qp_state;
+       if (cmd->base.attr_mask & IB_QP_CUR_STATE)
+               attr->cur_qp_state = cmd->base.cur_qp_state;
+       if (cmd->base.attr_mask & IB_QP_PATH_MTU)
+               attr->path_mtu = cmd->base.path_mtu;
+       if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
+               attr->path_mig_state = cmd->base.path_mig_state;
+       if (cmd->base.attr_mask & IB_QP_QKEY)
+               attr->qkey = cmd->base.qkey;
+       if (cmd->base.attr_mask & IB_QP_RQ_PSN)
+               attr->rq_psn = cmd->base.rq_psn;
+       if (cmd->base.attr_mask & IB_QP_SQ_PSN)
+               attr->sq_psn = cmd->base.sq_psn;
+       if (cmd->base.attr_mask & IB_QP_DEST_QPN)
+               attr->dest_qp_num = cmd->base.dest_qp_num;
+       if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
+               attr->qp_access_flags = cmd->base.qp_access_flags;
+       if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
+               attr->pkey_index = cmd->base.pkey_index;
+       if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
+               attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
+       if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
+               attr->max_rd_atomic = cmd->base.max_rd_atomic;
+       if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+               attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
+       if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
+               attr->min_rnr_timer = cmd->base.min_rnr_timer;
+       if (cmd->base.attr_mask & IB_QP_PORT)
+               attr->port_num = cmd->base.port_num;
+       if (cmd->base.attr_mask & IB_QP_TIMEOUT)
+               attr->timeout = cmd->base.timeout;
+       if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
+               attr->retry_cnt = cmd->base.retry_cnt;
+       if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
+               attr->rnr_retry = cmd->base.rnr_retry;
+       if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
+               attr->alt_port_num = cmd->base.alt_port_num;
+               attr->alt_timeout = cmd->base.alt_timeout;
+               attr->alt_pkey_index = cmd->base.alt_pkey_index;
+       }
+       if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
+               attr->rate_limit = cmd->rate_limit;
 
        if (cmd->base.attr_mask & IB_QP_AV)
                copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
index 6d974e2363df249c4291a3331d9d16f0a14232a6..50152c1b100452f7a4c8a9f733739ac25cbe777d 100644 (file)
@@ -440,6 +440,7 @@ static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
                        list_del(&entry->obj_list);
                kfree(entry);
        }
+       file->ev_queue.is_closed = 1;
        spin_unlock_irq(&file->ev_queue.lock);
 
        uverbs_close_fd(filp);
index 73ea6f0db88fb5c2b2bbd698be684baf34dc5331..be854628a7c63149c05ed74f9f001a39fb1bc59d 100644 (file)
@@ -248,6 +248,7 @@ void uverbs_destroy_api(struct uverbs_api *uapi)
                kfree(rcu_dereference_protected(*slot, true));
                radix_tree_iter_delete(&uapi->radix, &iter, slot);
        }
+       kfree(uapi);
 }
 
 struct uverbs_api *uverbs_alloc_api(
index 20b9f31052bf974fe43d335730daa4268ad614de..85cd1a3593d610132ded3796b5b90384bdb0342c 100644 (file)
@@ -78,7 +78,7 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
 /* Mutex to protect the list of bnxt_re devices added */
 static DEFINE_MUTEX(bnxt_re_dev_lock);
 static struct workqueue_struct *bnxt_re_wq;
-static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait);
+static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev);
 
 /* SR-IOV helper functions */
 
@@ -182,7 +182,7 @@ static void bnxt_re_shutdown(void *p)
        if (!rdev)
                return;
 
-       bnxt_re_ib_unreg(rdev, false);
+       bnxt_re_ib_unreg(rdev);
 }
 
 static void bnxt_re_stop_irq(void *handle)
@@ -251,7 +251,7 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
 /* Driver registration routines used to let the networking driver (bnxt_en)
  * to know that the RoCE driver is now installed
  */
-static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait)
+static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
 {
        struct bnxt_en_dev *en_dev;
        int rc;
@@ -260,14 +260,9 @@ static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait)
                return -EINVAL;
 
        en_dev = rdev->en_dev;
-       /* Acquire rtnl lock if it is not invokded from netdev event */
-       if (lock_wait)
-               rtnl_lock();
 
        rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
                                                    BNXT_ROCE_ULP);
-       if (lock_wait)
-               rtnl_unlock();
        return rc;
 }
 
@@ -281,14 +276,12 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
 
        en_dev = rdev->en_dev;
 
-       rtnl_lock();
        rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
                                                  &bnxt_re_ulp_ops, rdev);
-       rtnl_unlock();
        return rc;
 }
 
-static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
+static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
 {
        struct bnxt_en_dev *en_dev;
        int rc;
@@ -298,13 +291,9 @@ static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
 
        en_dev = rdev->en_dev;
 
-       if (lock_wait)
-               rtnl_lock();
 
        rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
 
-       if (lock_wait)
-               rtnl_unlock();
        return rc;
 }
 
@@ -320,7 +309,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
 
        num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
 
-       rtnl_lock();
        num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
                                                         rdev->msix_entries,
                                                         num_msix_want);
@@ -335,7 +323,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
        }
        rdev->num_msix = num_msix_got;
 done:
-       rtnl_unlock();
        return rc;
 }
 
@@ -358,24 +345,18 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
        fw_msg->timeout = timeout;
 }
 
-static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id,
-                                bool lock_wait)
+static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id)
 {
        struct bnxt_en_dev *en_dev = rdev->en_dev;
        struct hwrm_ring_free_input req = {0};
        struct hwrm_ring_free_output resp;
        struct bnxt_fw_msg fw_msg;
-       bool do_unlock = false;
        int rc = -EINVAL;
 
        if (!en_dev)
                return rc;
 
        memset(&fw_msg, 0, sizeof(fw_msg));
-       if (lock_wait) {
-               rtnl_lock();
-               do_unlock = true;
-       }
 
        bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
        req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -386,8 +367,6 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id,
        if (rc)
                dev_err(rdev_to_dev(rdev),
                        "Failed to free HW ring:%d :%#x", req.ring_id, rc);
-       if (do_unlock)
-               rtnl_unlock();
        return rc;
 }
 
@@ -405,7 +384,6 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
                return rc;
 
        memset(&fw_msg, 0, sizeof(fw_msg));
-       rtnl_lock();
        bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
        req.enables = 0;
        req.page_tbl_addr =  cpu_to_le64(dma_arr[0]);
@@ -426,27 +404,21 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
        if (!rc)
                *fw_ring_id = le16_to_cpu(resp.ring_id);
 
-       rtnl_unlock();
        return rc;
 }
 
 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
-                                     u32 fw_stats_ctx_id, bool lock_wait)
+                                     u32 fw_stats_ctx_id)
 {
        struct bnxt_en_dev *en_dev = rdev->en_dev;
        struct hwrm_stat_ctx_free_input req = {0};
        struct bnxt_fw_msg fw_msg;
-       bool do_unlock = false;
        int rc = -EINVAL;
 
        if (!en_dev)
                return rc;
 
        memset(&fw_msg, 0, sizeof(fw_msg));
-       if (lock_wait) {
-               rtnl_lock();
-               do_unlock = true;
-       }
 
        bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
        req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
@@ -457,8 +429,6 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
                dev_err(rdev_to_dev(rdev),
                        "Failed to free HW stats context %#x", rc);
 
-       if (do_unlock)
-               rtnl_unlock();
        return rc;
 }
 
@@ -478,7 +448,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
                return rc;
 
        memset(&fw_msg, 0, sizeof(fw_msg));
-       rtnl_lock();
 
        bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
        req.update_period_ms = cpu_to_le32(1000);
@@ -490,7 +459,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
        if (!rc)
                *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
 
-       rtnl_unlock();
        return rc;
 }
 
@@ -929,19 +897,19 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
        return rc;
 }
 
-static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev, bool lock_wait)
+static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
 {
        int i;
 
        for (i = 0; i < rdev->num_msix - 1; i++) {
-               bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, lock_wait);
+               bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
                bnxt_qplib_free_nq(&rdev->nq[i]);
        }
 }
 
-static void bnxt_re_free_res(struct bnxt_re_dev *rdev, bool lock_wait)
+static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
 {
-       bnxt_re_free_nq_res(rdev, lock_wait);
+       bnxt_re_free_nq_res(rdev);
 
        if (rdev->qplib_res.dpi_tbl.max) {
                bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
@@ -1219,7 +1187,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
        return 0;
 }
 
-static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
+static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
 {
        int i, rc;
 
@@ -1234,28 +1202,27 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
                cancel_delayed_work(&rdev->worker);
 
        bnxt_re_cleanup_res(rdev);
-       bnxt_re_free_res(rdev, lock_wait);
+       bnxt_re_free_res(rdev);
 
        if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
                rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
                if (rc)
                        dev_warn(rdev_to_dev(rdev),
                                 "Failed to deinitialize RCFW: %#x", rc);
-               bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id,
-                                          lock_wait);
+               bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
                bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
                bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
-               bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, lock_wait);
+               bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
                bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
        }
        if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
-               rc = bnxt_re_free_msix(rdev, lock_wait);
+               rc = bnxt_re_free_msix(rdev);
                if (rc)
                        dev_warn(rdev_to_dev(rdev),
                                 "Failed to free MSI-X vectors: %#x", rc);
        }
        if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
-               rc = bnxt_re_unregister_netdev(rdev, lock_wait);
+               rc = bnxt_re_unregister_netdev(rdev);
                if (rc)
                        dev_warn(rdev_to_dev(rdev),
                                 "Failed to unregister with netdev: %#x", rc);
@@ -1276,6 +1243,12 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
 {
        int i, j, rc;
 
+       bool locked;
+
+       /* Acquire rtnl lock through out this function */
+       rtnl_lock();
+       locked = true;
+
        /* Registered a new RoCE device instance to netdev */
        rc = bnxt_re_register_netdev(rdev);
        if (rc) {
@@ -1374,12 +1347,16 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
                schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
        }
 
+       rtnl_unlock();
+       locked = false;
+
        /* Register ib dev */
        rc = bnxt_re_register_ib(rdev);
        if (rc) {
                pr_err("Failed to register with IB: %#x\n", rc);
                goto fail;
        }
+       set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
        dev_info(rdev_to_dev(rdev), "Device registered successfully");
        for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) {
                rc = device_create_file(&rdev->ibdev.dev,
@@ -1395,7 +1372,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
                        goto fail;
                }
        }
-       set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
        ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
                         &rdev->active_width);
        set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
@@ -1404,17 +1380,21 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
 
        return 0;
 free_sctx:
-       bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, true);
+       bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
 free_ctx:
        bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
 disable_rcfw:
        bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
 free_ring:
-       bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, true);
+       bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
 free_rcfw:
        bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
 fail:
-       bnxt_re_ib_unreg(rdev, true);
+       if (!locked)
+               rtnl_lock();
+       bnxt_re_ib_unreg(rdev);
+       rtnl_unlock();
+
        return rc;
 }
 
@@ -1567,7 +1547,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
                 */
                if (atomic_read(&rdev->sched_count) > 0)
                        goto exit;
-               bnxt_re_ib_unreg(rdev, false);
+               bnxt_re_ib_unreg(rdev);
                bnxt_re_remove_one(rdev);
                bnxt_re_dev_unreg(rdev);
                break;
@@ -1646,7 +1626,10 @@ static void __exit bnxt_re_mod_exit(void)
                 */
                flush_workqueue(bnxt_re_wq);
                bnxt_re_dev_stop(rdev);
-               bnxt_re_ib_unreg(rdev, true);
+               /* Acquire the rtnl_lock as the L2 resources are freed here */
+               rtnl_lock();
+               bnxt_re_ib_unreg(rdev);
+               rtnl_unlock();
                bnxt_re_remove_one(rdev);
                bnxt_re_dev_unreg(rdev);
        }
index 2c19bf772451bfef693eaad1fa5a678e5cc9e067..e1668bcc2d13d71aba2f35021ec25cd693e79682 100644 (file)
@@ -6733,6 +6733,7 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
        struct hfi1_devdata *dd = ppd->dd;
        struct send_context *sc;
        int i;
+       int sc_flags;
 
        if (flags & FREEZE_SELF)
                write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
@@ -6743,11 +6744,13 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
        /* notify all SDMA engines that they are going into a freeze */
        sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
 
+       sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
+                                             SCF_LINK_DOWN : 0);
        /* do halt pre-handling on all enabled send contexts */
        for (i = 0; i < dd->num_send_contexts; i++) {
                sc = dd->send_contexts[i].sc;
                if (sc && (sc->flags & SCF_ENABLED))
-                       sc_stop(sc, SCF_FROZEN | SCF_HALTED);
+                       sc_stop(sc, sc_flags);
        }
 
        /* Send context are frozen. Notify user space */
@@ -10674,6 +10677,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
                add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
 
                handle_linkup_change(dd, 1);
+               pio_kernel_linkup(dd);
 
                /*
                 * After link up, a new link width will have been set.
index c2c1cba5b23be440bc292a205ca523d6962c4233..752057647f091734368f998c0173234d8a1f2dd0 100644 (file)
@@ -86,6 +86,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
        unsigned long flags;
        int write = 1;  /* write sendctrl back */
        int flush = 0;  /* re-read sendctrl to make sure it is flushed */
+       int i;
 
        spin_lock_irqsave(&dd->sendctrl_lock, flags);
 
@@ -95,9 +96,13 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
                reg |= SEND_CTRL_SEND_ENABLE_SMASK;
        /* Fall through */
        case PSC_DATA_VL_ENABLE:
+               mask = 0;
+               for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
+                       if (!dd->vld[i].mtu)
+                               mask |= BIT_ULL(i);
                /* Disallow sending on VLs not enabled */
-               mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
-                               SEND_CTRL_UNSUPPORTED_VL_SHIFT;
+               mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
+                       SEND_CTRL_UNSUPPORTED_VL_SHIFT;
                reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
                break;
        case PSC_GLOBAL_DISABLE:
@@ -921,20 +926,18 @@ void sc_free(struct send_context *sc)
 void sc_disable(struct send_context *sc)
 {
        u64 reg;
-       unsigned long flags;
        struct pio_buf *pbuf;
 
        if (!sc)
                return;
 
        /* do all steps, even if already disabled */
-       spin_lock_irqsave(&sc->alloc_lock, flags);
+       spin_lock_irq(&sc->alloc_lock);
        reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
        reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
        sc->flags &= ~SCF_ENABLED;
        sc_wait_for_packet_egress(sc, 1);
        write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
-       spin_unlock_irqrestore(&sc->alloc_lock, flags);
 
        /*
         * Flush any waiters.  Once the context is disabled,
@@ -944,7 +947,7 @@ void sc_disable(struct send_context *sc)
         * proceed with the flush.
         */
        udelay(1);
-       spin_lock_irqsave(&sc->release_lock, flags);
+       spin_lock(&sc->release_lock);
        if (sc->sr) {   /* this context has a shadow ring */
                while (sc->sr_tail != sc->sr_head) {
                        pbuf = &sc->sr[sc->sr_tail].pbuf;
@@ -955,7 +958,8 @@ void sc_disable(struct send_context *sc)
                                sc->sr_tail = 0;
                }
        }
-       spin_unlock_irqrestore(&sc->release_lock, flags);
+       spin_unlock(&sc->release_lock);
+       spin_unlock_irq(&sc->alloc_lock);
 }
 
 /* return SendEgressCtxtStatus.PacketOccupancy */
@@ -1178,11 +1182,39 @@ void pio_kernel_unfreeze(struct hfi1_devdata *dd)
                sc = dd->send_contexts[i].sc;
                if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
                        continue;
+               if (sc->flags & SCF_LINK_DOWN)
+                       continue;
 
                sc_enable(sc);  /* will clear the sc frozen flag */
        }
 }
 
+/**
+ * pio_kernel_linkup() - Re-enable send contexts after linkup event
+ * @dd: valid devive data
+ *
+ * When the link goes down, the freeze path is taken.  However, a link down
+ * event is different from a freeze because if the send context is re-enabled
+ * whowever is sending data will start sending data again, which will hang
+ * any QP that is sending data.
+ *
+ * The freeze path now looks at the type of event that occurs and takes this
+ * path for link down event.
+ */
+void pio_kernel_linkup(struct hfi1_devdata *dd)
+{
+       struct send_context *sc;
+       int i;
+
+       for (i = 0; i < dd->num_send_contexts; i++) {
+               sc = dd->send_contexts[i].sc;
+               if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
+                       continue;
+
+               sc_enable(sc);  /* will clear the sc link down flag */
+       }
+}
+
 /*
  * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
  * Returns:
@@ -1382,11 +1414,10 @@ void sc_stop(struct send_context *sc, int flag)
 {
        unsigned long flags;
 
-       /* mark the context */
-       sc->flags |= flag;
-
        /* stop buffer allocations */
        spin_lock_irqsave(&sc->alloc_lock, flags);
+       /* mark the context */
+       sc->flags |= flag;
        sc->flags &= ~SCF_ENABLED;
        spin_unlock_irqrestore(&sc->alloc_lock, flags);
        wake_up(&sc->halt_wait);
index 058b08f459ab7947e53aa5ad676febc9999f7ade..aaf372c3e5d6a3cc0de82aaf9819c02b97bd195f 100644 (file)
@@ -139,6 +139,7 @@ struct send_context {
 #define SCF_IN_FREE 0x02
 #define SCF_HALTED  0x04
 #define SCF_FROZEN  0x08
+#define SCF_LINK_DOWN 0x10
 
 struct send_context_info {
        struct send_context *sc;        /* allocated working context */
@@ -306,6 +307,7 @@ void set_pio_integrity(struct send_context *sc);
 void pio_reset_all(struct hfi1_devdata *dd);
 void pio_freeze(struct hfi1_devdata *dd);
 void pio_kernel_unfreeze(struct hfi1_devdata *dd);
+void pio_kernel_linkup(struct hfi1_devdata *dd);
 
 /* global PIO send control operations */
 #define PSC_GLOBAL_ENABLE 0
index a3a7b33196d64158cd069cdf5d32c429f79f7b5d..5c88706121c1cb0faf2da79dae014113f7d0a0d9 100644 (file)
@@ -828,7 +828,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
                        if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
                                if (++req->iov_idx == req->data_iovs) {
                                        ret = -EFAULT;
-                                       goto free_txreq;
+                                       goto free_tx;
                                }
                                iovec = &req->iovs[req->iov_idx];
                                WARN_ON(iovec->offset);
index 13374c727b142d61dc3f6bdb0603f958c6a1a0ca..a7c586a5589d642524f7e659c6a4dfa512eed4e2 100644 (file)
@@ -1582,6 +1582,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
        struct hfi1_pportdata *ppd;
        struct hfi1_devdata *dd;
        u8 sc5;
+       u8 sl;
 
        if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) &&
            !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
@@ -1590,8 +1591,13 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
        /* test the mapping for validity */
        ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
        ppd = ppd_from_ibp(ibp);
-       sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
        dd = dd_from_ppd(ppd);
+
+       sl = rdma_ah_get_sl(ah_attr);
+       if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
+               return -EINVAL;
+
+       sc5 = ibp->sl_to_sc[sl];
        if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
                return -EINVAL;
        return 0;
index ac116d63e4661adf03662bdfb2cd598cc179ce3c..f2f11e652dcd2a751d10397c8c65d6be8a53b53e 100644 (file)
@@ -723,6 +723,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
                attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
        struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
        struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
        struct devx_obj *obj;
        int err;
 
@@ -754,10 +755,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
 
        err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
        if (err)
-               goto obj_free;
+               goto obj_destroy;
 
        return 0;
 
+obj_destroy:
+       mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
 obj_free:
        kfree(obj);
        return err;
index 444d16520506a1773f01dc0b89087091d867e939..0b34e909505f5fa4c6a1404227a137ae94e54aa9 100644 (file)
@@ -2951,7 +2951,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
 {
        struct srp_target_port *target = host_to_target(scmnd->device->host);
        struct srp_rdma_ch *ch;
-       int i;
+       int i, j;
        u8 status;
 
        shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -2965,8 +2965,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
 
        for (i = 0; i < target->ch_count; i++) {
                ch = &target->ch[i];
-               for (i = 0; i < target->req_ring_size; ++i) {
-                       struct srp_request *req = &ch->req_ring[i];
+               for (j = 0; j < target->req_ring_size; ++j) {
+                       struct srp_request *req = &ch->req_ring[j];
 
                        srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
                }
index 6f62da2909ec0f07eb7cd7d1a76122bc11ed13a7..6caee807cafabf6955053ee817c30f70571d3ef5 100644 (file)
@@ -75,8 +75,7 @@ MODULE_LICENSE("GPL");
  */
 
 
-static unsigned char atakbd_keycode[0x72] = {  /* American layout */
-       [0]      = KEY_GRAVE,
+static unsigned char atakbd_keycode[0x73] = {  /* American layout */
        [1]      = KEY_ESC,
        [2]      = KEY_1,
        [3]      = KEY_2,
@@ -117,9 +116,9 @@ static unsigned char atakbd_keycode[0x72] = {       /* American layout */
        [38]     = KEY_L,
        [39]     = KEY_SEMICOLON,
        [40]     = KEY_APOSTROPHE,
-       [41]     = KEY_BACKSLASH,       /* FIXME, '#' */
+       [41]     = KEY_GRAVE,
        [42]     = KEY_LEFTSHIFT,
-       [43]     = KEY_GRAVE,           /* FIXME: '~' */
+       [43]     = KEY_BACKSLASH,
        [44]     = KEY_Z,
        [45]     = KEY_X,
        [46]     = KEY_C,
@@ -145,45 +144,34 @@ static unsigned char atakbd_keycode[0x72] = {     /* American layout */
        [66]     = KEY_F8,
        [67]     = KEY_F9,
        [68]     = KEY_F10,
-       [69]     = KEY_ESC,
-       [70]     = KEY_DELETE,
-       [71]     = KEY_KP7,
-       [72]     = KEY_KP8,
-       [73]     = KEY_KP9,
+       [71]     = KEY_HOME,
+       [72]     = KEY_UP,
        [74]     = KEY_KPMINUS,
-       [75]     = KEY_KP4,
-       [76]     = KEY_KP5,
-       [77]     = KEY_KP6,
+       [75]     = KEY_LEFT,
+       [77]     = KEY_RIGHT,
        [78]     = KEY_KPPLUS,
-       [79]     = KEY_KP1,
-       [80]     = KEY_KP2,
-       [81]     = KEY_KP3,
-       [82]     = KEY_KP0,
-       [83]     = KEY_KPDOT,
-       [90]     = KEY_KPLEFTPAREN,
-       [91]     = KEY_KPRIGHTPAREN,
-       [92]     = KEY_KPASTERISK,      /* FIXME */
-       [93]     = KEY_KPASTERISK,
-       [94]     = KEY_KPPLUS,
-       [95]     = KEY_HELP,
+       [80]     = KEY_DOWN,
+       [82]     = KEY_INSERT,
+       [83]     = KEY_DELETE,
        [96]     = KEY_102ND,
-       [97]     = KEY_KPASTERISK,      /* FIXME */
-       [98]     = KEY_KPSLASH,
+       [97]     = KEY_UNDO,
+       [98]     = KEY_HELP,
        [99]     = KEY_KPLEFTPAREN,
        [100]    = KEY_KPRIGHTPAREN,
        [101]    = KEY_KPSLASH,
        [102]    = KEY_KPASTERISK,
-       [103]    = KEY_UP,
-       [104]    = KEY_KPASTERISK,      /* FIXME */
-       [105]    = KEY_LEFT,
-       [106]    = KEY_RIGHT,
-       [107]    = KEY_KPASTERISK,      /* FIXME */
-       [108]    = KEY_DOWN,
-       [109]    = KEY_KPASTERISK,      /* FIXME */
-       [110]    = KEY_KPASTERISK,      /* FIXME */
-       [111]    = KEY_KPASTERISK,      /* FIXME */
-       [112]    = KEY_KPASTERISK,      /* FIXME */
-       [113]    = KEY_KPASTERISK       /* FIXME */
+       [103]    = KEY_KP7,
+       [104]    = KEY_KP8,
+       [105]    = KEY_KP9,
+       [106]    = KEY_KP4,
+       [107]    = KEY_KP5,
+       [108]    = KEY_KP6,
+       [109]    = KEY_KP1,
+       [110]    = KEY_KP2,
+       [111]    = KEY_KP3,
+       [112]    = KEY_KP0,
+       [113]    = KEY_KPDOT,
+       [114]    = KEY_KPENTER,
 };
 
 static struct input_dev *atakbd_dev;
@@ -191,21 +179,15 @@ static struct input_dev *atakbd_dev;
 static void atakbd_interrupt(unsigned char scancode, char down)
 {
 
-       if (scancode < 0x72) {          /* scancodes < 0xf2 are keys */
+       if (scancode < 0x73) {          /* scancodes < 0xf3 are keys */
 
                // report raw events here?
 
                scancode = atakbd_keycode[scancode];
 
-               if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
-                       input_report_key(atakbd_dev, scancode, 1);
-                       input_report_key(atakbd_dev, scancode, 0);
-                       input_sync(atakbd_dev);
-               } else {
-                       input_report_key(atakbd_dev, scancode, down);
-                       input_sync(atakbd_dev);
-               }
-       } else                          /* scancodes >= 0xf2 are mouse data, most likely */
+               input_report_key(atakbd_dev, scancode, down);
+               input_sync(atakbd_dev);
+       } else                          /* scancodes >= 0xf3 are mouse data, most likely */
                printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
 
        return;
index 96a887f336982f7efc0d971e145ded52829b2dc8..eb14ddf693467b4619a9501aa5e712a9b45dfcdf 100644 (file)
@@ -410,7 +410,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
        min = abs->minimum;
        max = abs->maximum;
 
-       if ((min != 0 || max != 0) && max <= min) {
+       if ((min != 0 || max != 0) && max < min) {
                printk(KERN_DEBUG
                       "%s: invalid abs[%02x] min:%d max:%d\n",
                       UINPUT_NAME, code, min, max);
index 44f57cf6675bbf10ed6fd5bc8b9900936e76a086..2d95e8d93cc761aefb102217473d940faf1e4d02 100644 (file)
@@ -1178,6 +1178,8 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
 static const char * const middle_button_pnp_ids[] = {
        "LEN2131", /* ThinkPad P52 w/ NFC */
        "LEN2132", /* ThinkPad P52 */
+       "LEN2133", /* ThinkPad P72 w/ NFC */
+       "LEN2134", /* ThinkPad P72 */
        NULL
 };
 
index 80e69bb8283e4b417c677637889654e0b1cadf30..83ac8c128192846f21a9aeb40d80b39869a1c245 100644 (file)
@@ -241,6 +241,9 @@ static int __maybe_unused egalax_ts_suspend(struct device *dev)
        struct i2c_client *client = to_i2c_client(dev);
        int ret;
 
+       if (device_may_wakeup(dev))
+               return enable_irq_wake(client->irq);
+
        ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN);
        return ret > 0 ? 0 : ret;
 }
@@ -249,6 +252,9 @@ static int __maybe_unused egalax_ts_resume(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
 
+       if (device_may_wakeup(dev))
+               return disable_irq_wake(client->irq);
+
        return egalax_wake_up_device(client);
 }
 
index 4e04fff23977348877cce7a00d4ae394fadf91ef..73e47d93e7a05a5f7b9ebc79096081c573656746 100644 (file)
@@ -246,7 +246,13 @@ static u16 get_alias(struct device *dev)
 
        /* The callers make sure that get_device_id() does not fail here */
        devid = get_device_id(dev);
+
+       /* For ACPI HID devices, we simply return the devid as such */
+       if (!dev_is_pci(dev))
+               return devid;
+
        ivrs_alias = amd_iommu_alias_table[devid];
+
        pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
 
        if (ivrs_alias == pci_alias)
index 5f3f10cf9d9d0fecb1fc5747c60cbe9d4f9034b1..bedc801b06a0bf2c6745511acbab08e769a54eb2 100644 (file)
@@ -2540,9 +2540,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
        if (dev && dev_is_pci(dev) && info->pasid_supported) {
                ret = intel_pasid_alloc_table(dev);
                if (ret) {
-                       __dmar_remove_one_dev_info(info);
-                       spin_unlock_irqrestore(&device_domain_lock, flags);
-                       return NULL;
+                       pr_warn("No pasid table for %s, pasid disabled\n",
+                               dev_name(dev));
+                       info->pasid_supported = 0;
                }
        }
        spin_unlock_irqrestore(&device_domain_lock, flags);
index 1c05ed6fc5a596383c7ed94d7bac69de38d4554d..1fb5e12b029ac717379ec74544c83254ea0c0259 100644 (file)
@@ -11,7 +11,7 @@
 #define __INTEL_PASID_H
 
 #define PASID_MIN                      0x1
-#define PASID_MAX                      0x100000
+#define PASID_MAX                      0x20000
 
 struct pasid_entry {
        u64 val;
index 258115b10fa9e448129d84a0663ab99d51c64ee5..ad3e2b97469ed6de44f3524cafd677825d7afca5 100644 (file)
@@ -1241,6 +1241,12 @@ static int rk_iommu_probe(struct platform_device *pdev)
 
 static void rk_iommu_shutdown(struct platform_device *pdev)
 {
+       struct rk_iommu *iommu = platform_get_drvdata(pdev);
+       int i = 0, irq;
+
+       while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
+               devm_free_irq(iommu->dev, irq, iommu);
+
        pm_runtime_force_suspend(&pdev->dev);
 }
 
index 83504dd8100ab2a80d7f0e737e50266de41add32..954dad29e6e8fca910b0ebd24171591f2acd0831 100644 (file)
@@ -965,6 +965,7 @@ void bch_prio_write(struct cache *ca);
 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
 
 extern struct workqueue_struct *bcache_wq;
+extern struct workqueue_struct *bch_journal_wq;
 extern struct mutex bch_register_lock;
 extern struct list_head bch_cache_sets;
 
index 6116bbf870d8ef9004717bf5d04803895d085bab..522c7426f3a05cee10df0e984521472849b9b707 100644 (file)
@@ -485,7 +485,7 @@ static void do_journal_discard(struct cache *ca)
 
                closure_get(&ca->set->cl);
                INIT_WORK(&ja->discard_work, journal_discard_work);
-               schedule_work(&ja->discard_work);
+               queue_work(bch_journal_wq, &ja->discard_work);
        }
 }
 
@@ -592,7 +592,7 @@ static void journal_write_done(struct closure *cl)
                : &j->w[0];
 
        __closure_wake_up(&w->wait);
-       continue_at_nobarrier(cl, journal_write, system_wq);
+       continue_at_nobarrier(cl, journal_write, bch_journal_wq);
 }
 
 static void journal_write_unlock(struct closure *cl)
@@ -627,7 +627,7 @@ static void journal_write_unlocked(struct closure *cl)
                spin_unlock(&c->journal.lock);
 
                btree_flush_write(c);
-               continue_at(cl, journal_write, system_wq);
+               continue_at(cl, journal_write, bch_journal_wq);
                return;
        }
 
index 94c756c66bd7216a6d83b67eaef891269f965467..30ba9aeb5ee8345ac192e34e51e67beae2127950 100644 (file)
@@ -47,6 +47,7 @@ static int bcache_major;
 static DEFINE_IDA(bcache_device_idx);
 static wait_queue_head_t unregister_wait;
 struct workqueue_struct *bcache_wq;
+struct workqueue_struct *bch_journal_wq;
 
 #define BTREE_MAX_PAGES                (256 * 1024 / PAGE_SIZE)
 /* limitation of partitions number on single bcache device */
@@ -2341,6 +2342,9 @@ static void bcache_exit(void)
                kobject_put(bcache_kobj);
        if (bcache_wq)
                destroy_workqueue(bcache_wq);
+       if (bch_journal_wq)
+               destroy_workqueue(bch_journal_wq);
+
        if (bcache_major)
                unregister_blkdev(bcache_major, "bcache");
        unregister_reboot_notifier(&reboot);
@@ -2370,6 +2374,10 @@ static int __init bcache_init(void)
        if (!bcache_wq)
                goto err;
 
+       bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
+       if (!bch_journal_wq)
+               goto err;
+
        bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
        if (!bcache_kobj)
                goto err;
index b5410aeb5fe26ea9975716dc4161ba2b06d03dac..bb41bea950ac3f6ccfb325fb3cbdebf51d58e408 100644 (file)
@@ -1159,41 +1159,21 @@ static int mt9v111_probe(struct i2c_client *client)
                                              V4L2_CID_AUTO_WHITE_BALANCE,
                                              0, 1, 1,
                                              V4L2_WHITE_BALANCE_AUTO);
-       if (IS_ERR_OR_NULL(mt9v111->auto_awb)) {
-               ret = PTR_ERR(mt9v111->auto_awb);
-               goto error_free_ctrls;
-       }
-
        mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls,
                                                   &mt9v111_ctrl_ops,
                                                   V4L2_CID_EXPOSURE_AUTO,
                                                   V4L2_EXPOSURE_MANUAL,
                                                   0, V4L2_EXPOSURE_AUTO);
-       if (IS_ERR_OR_NULL(mt9v111->auto_exp)) {
-               ret = PTR_ERR(mt9v111->auto_exp);
-               goto error_free_ctrls;
-       }
-
-       /* Initialize timings */
        mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
                                            V4L2_CID_HBLANK,
                                            MT9V111_CORE_R05_MIN_HBLANK,
                                            MT9V111_CORE_R05_MAX_HBLANK, 1,
                                            MT9V111_CORE_R05_DEF_HBLANK);
-       if (IS_ERR_OR_NULL(mt9v111->hblank)) {
-               ret = PTR_ERR(mt9v111->hblank);
-               goto error_free_ctrls;
-       }
-
        mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
                                            V4L2_CID_VBLANK,
                                            MT9V111_CORE_R06_MIN_VBLANK,
                                            MT9V111_CORE_R06_MAX_VBLANK, 1,
                                            MT9V111_CORE_R06_DEF_VBLANK);
-       if (IS_ERR_OR_NULL(mt9v111->vblank)) {
-               ret = PTR_ERR(mt9v111->vblank);
-               goto error_free_ctrls;
-       }
 
        /* PIXEL_RATE is fixed: just expose it to user space. */
        v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
@@ -1201,6 +1181,10 @@ static int mt9v111_probe(struct i2c_client *client)
                          DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1,
                          DIV_ROUND_CLOSEST(mt9v111->sysclk, 2));
 
+       if (mt9v111->ctrls.error) {
+               ret = mt9v111->ctrls.error;
+               goto error_free_ctrls;
+       }
        mt9v111->sd.ctrl_handler = &mt9v111->ctrls;
 
        /* Start with default configuration: 640x480 UYVY. */
@@ -1226,26 +1210,27 @@ static int mt9v111_probe(struct i2c_client *client)
        mt9v111->pad.flags      = MEDIA_PAD_FL_SOURCE;
        ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad);
        if (ret)
-               goto error_free_ctrls;
+               goto error_free_entity;
 #endif
 
        ret = mt9v111_chip_probe(mt9v111);
        if (ret)
-               goto error_free_ctrls;
+               goto error_free_entity;
 
        ret = v4l2_async_register_subdev(&mt9v111->sd);
        if (ret)
-               goto error_free_ctrls;
+               goto error_free_entity;
 
        return 0;
 
-error_free_ctrls:
-       v4l2_ctrl_handler_free(&mt9v111->ctrls);
-
+error_free_entity:
 #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
        media_entity_cleanup(&mt9v111->sd.entity);
 #endif
 
+error_free_ctrls:
+       v4l2_ctrl_handler_free(&mt9v111->ctrls);
+
        mutex_destroy(&mt9v111->pwr_mutex);
        mutex_destroy(&mt9v111->stream_mutex);
 
@@ -1259,12 +1244,12 @@ static int mt9v111_remove(struct i2c_client *client)
 
        v4l2_async_unregister_subdev(sd);
 
-       v4l2_ctrl_handler_free(&mt9v111->ctrls);
-
 #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
        media_entity_cleanup(&sd->entity);
 #endif
 
+       v4l2_ctrl_handler_free(&mt9v111->ctrls);
+
        mutex_destroy(&mt9v111->pwr_mutex);
        mutex_destroy(&mt9v111->stream_mutex);
 
index 94c1fe0e978794124568ecdecbcc211aef829929..54fe90acb5b2962432140949a17e1630361e08ca 100644 (file)
@@ -541,6 +541,8 @@ config VIDEO_CROS_EC_CEC
        depends on MFD_CROS_EC
        select CEC_CORE
        select CEC_NOTIFIER
+       select CHROME_PLATFORMS
+       select CROS_EC_PROTO
        ---help---
          If you say yes here you will get support for the
          ChromeOS Embedded Controller's CEC.
index 729b31891466db3431c0ac295b0aa4df64e16c1b..a5ae85674ffbb01c7bd93322a26821406e87f6fe 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/clk.h>
 #include <linux/completion.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
index c832539397d7a852a93093a55fb1e3385f85bfdc..12bce391d71fd0393d63413f52b114bb516fa1dd 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 
 #define CAMSS_CSI_PHY_LNn_CFG2(n)              (0x004 + 0x40 * (n))
 #define CAMSS_CSI_PHY_LNn_CFG3(n)              (0x008 + 0x40 * (n))
index bcd0dfd3361865a18873ccebd5fd7879c6937b02..2e65caf1ecae7046bdfea5bd9f272ddbbc0556d5 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 
 #define CSIPHY_3PH_LNn_CFG1(n)                 (0x000 + 0x100 * (n))
 #define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG    (BIT(7) | BIT(6))
index 4559f3b1b38cb823352dae35271057af87aab72a..008afb85023bec24eefefaf5042c32269eb257c0 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
index 7f269021d08c0d8c13995328c9ba85e99de836e4..1f33b4eb198cc0c3085239afbd52aee428bf63a4 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/clk.h>
 #include <linux/completion.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/iopoll.h>
 #include <linux/kernel.h>
 #include <linux/mutex.h>
@@ -1076,8 +1077,8 @@ int msm_ispif_subdev_init(struct ispif_device *ispif,
        else
                return -EINVAL;
 
-       ispif->line = kcalloc(ispif->line_num, sizeof(*ispif->line),
-                             GFP_KERNEL);
+       ispif->line = devm_kcalloc(dev, ispif->line_num, sizeof(*ispif->line),
+                                  GFP_KERNEL);
        if (!ispif->line)
                return -ENOMEM;
 
index da3a9fed9f2ddcb1a7ae39a541f6c9aad2be2ae8..174a36be6f5d86630947a8203e524414b4043ccd 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/iopoll.h>
 
 #include "camss-vfe.h"
index 4c584bffd179e1a25d8ddf95ea0403f8cb132d45..0dca8bf9281e7749c32dbc113dcd0a7d277c22e1 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/iopoll.h>
 
 #include "camss-vfe.h"
index dcc0c30ef1b109a91bf6185b434feba5fed94794..669615fff6a0819dfa792a9be7253f92b39f79c5 100644 (file)
@@ -848,17 +848,18 @@ static int camss_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       camss->csiphy = kcalloc(camss->csiphy_num, sizeof(*camss->csiphy),
-                               GFP_KERNEL);
+       camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
+                                    sizeof(*camss->csiphy), GFP_KERNEL);
        if (!camss->csiphy)
                return -ENOMEM;
 
-       camss->csid = kcalloc(camss->csid_num, sizeof(*camss->csid),
-                             GFP_KERNEL);
+       camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
+                                  GFP_KERNEL);
        if (!camss->csid)
                return -ENOMEM;
 
-       camss->vfe = kcalloc(camss->vfe_num, sizeof(*camss->vfe), GFP_KERNEL);
+       camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe),
+                                 GFP_KERNEL);
        if (!camss->vfe)
                return -ENOMEM;
 
@@ -993,12 +994,12 @@ static const struct of_device_id camss_dt_match[] = {
 
 MODULE_DEVICE_TABLE(of, camss_dt_match);
 
-static int camss_runtime_suspend(struct device *dev)
+static int __maybe_unused camss_runtime_suspend(struct device *dev)
 {
        return 0;
 }
 
-static int camss_runtime_resume(struct device *dev)
+static int __maybe_unused camss_runtime_resume(struct device *dev)
 {
        return 0;
 }
index 666d319d3d1ab21db3b50f35a725536bb486cd38..1f6c1eefe38920c9f1e08d1f99158db7e715c838 100644 (file)
@@ -402,8 +402,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
                        if (msg[0].addr == state->af9033_i2c_addr[1])
                                reg |= 0x100000;
 
-                       ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
-                                       msg[0].len - 3);
+                       ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
+                                                                &msg[0].buf[3],
+                                                                msg[0].len - 3)
+                                               : -EOPNOTSUPP;
                } else {
                        /* I2C write */
                        u8 buf[MAX_XFER_SIZE];
index a764a83f99dabe54585dbad7dba40b6601177c03..0d87e11e7f1d84537fe43d95249b1bd3a2ce291d 100644 (file)
@@ -971,16 +971,13 @@ static void bond_poll_controller(struct net_device *bond_dev)
        struct slave *slave = NULL;
        struct list_head *iter;
        struct ad_info ad_info;
-       struct netpoll_info *ni;
-       const struct net_device_ops *ops;
 
        if (BOND_MODE(bond) == BOND_MODE_8023AD)
                if (bond_3ad_get_active_agg_info(bond, &ad_info))
                        return;
 
        bond_for_each_slave_rcu(bond, slave, iter) {
-               ops = slave->dev->netdev_ops;
-               if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
+               if (!bond_slave_is_up(slave))
                        continue;
 
                if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -992,11 +989,7 @@ static void bond_poll_controller(struct net_device *bond_dev)
                                continue;
                }
 
-               ni = rcu_dereference_bh(slave->dev->npinfo);
-               if (down_trylock(&ni->dev_lock))
-                       continue;
-               ops->ndo_poll_controller(slave->dev);
-               up(&ni->dev_lock);
+               netpoll_poll_dev(slave->dev);
        }
 }
 
index 024998d6d8c6ed959c555c82b66f687cb0a85c2c..6a8e2567f2bdef5e4a7fa3685de97354c67ed280 100644 (file)
@@ -154,7 +154,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
 static void bmac_set_timeout(struct net_device *dev);
 static void bmac_tx_timeout(struct timer_list *t);
-static int bmac_output(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev);
 static void bmac_start(struct net_device *dev);
 
 #define        DBDMA_SET(x)    ( ((x) | (x) << 16) )
@@ -1456,7 +1456,7 @@ bmac_start(struct net_device *dev)
        spin_unlock_irqrestore(&bp->lock, flags);
 }
 
-static int
+static netdev_tx_t
 bmac_output(struct sk_buff *skb, struct net_device *dev)
 {
        struct bmac_data *bp = netdev_priv(dev);
index 0b5429d76bcf230f7c32fd52c690d1fc385535c5..68b9ee4894892f96f82453cd657d7aa4875b467b 100644 (file)
@@ -78,7 +78,7 @@ struct mace_data {
 
 static int mace_open(struct net_device *dev);
 static int mace_close(struct net_device *dev);
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
 static void mace_set_multicast(struct net_device *dev);
 static void mace_reset(struct net_device *dev);
 static int mace_set_address(struct net_device *dev, void *addr);
@@ -525,7 +525,7 @@ static inline void mace_set_timeout(struct net_device *dev)
     mp->timeout_active = 1;
 }
 
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
 {
     struct mace_data *mp = netdev_priv(dev);
     volatile struct dbdma_regs __iomem *td = mp->tx_dma;
index 137cbb470af2301b83864901332d794dcaa6f9b5..376f2c2613e7fbe27a1cbc18d15e77527fe3622a 100644 (file)
@@ -89,7 +89,7 @@ struct mace_frame {
 
 static int mace_open(struct net_device *dev);
 static int mace_close(struct net_device *dev);
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
 static void mace_set_multicast(struct net_device *dev);
 static int mace_set_address(struct net_device *dev, void *addr);
 static void mace_reset(struct net_device *dev);
@@ -444,7 +444,7 @@ static int mace_close(struct net_device *dev)
  * Transmit a frame
  */
 
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
 {
        struct mace_data *mp = netdev_priv(dev);
        unsigned long flags;
index b5f1f62e8e253785436fa7cd9119a8467edf4fd4..d1e1a0ba86150208e73ffdbce006e8d1a822302c 100644 (file)
@@ -225,9 +225,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                }
 
                /* for single fragment packets use build_skb() */
-               if (buff->is_eop) {
+               if (buff->is_eop &&
+                   buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
                        skb = build_skb(page_address(buff->page),
-                                       buff->len + AQ_SKB_ALIGN);
+                                       AQ_CFG_RX_FRAME_MAX);
                        if (unlikely(!skb)) {
                                err = -ENOMEM;
                                goto err_exit;
@@ -247,18 +248,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                                        buff->len - ETH_HLEN,
                                        SKB_TRUESIZE(buff->len - ETH_HLEN));
 
-                       for (i = 1U, next_ = buff->next,
-                            buff_ = &self->buff_ring[next_]; true;
-                            next_ = buff_->next,
-                            buff_ = &self->buff_ring[next_], ++i) {
-                               skb_add_rx_frag(skb, i, buff_->page, 0,
-                                               buff_->len,
-                                               SKB_TRUESIZE(buff->len -
-                                               ETH_HLEN));
-                               buff_->is_cleaned = 1;
-
-                               if (buff_->is_eop)
-                                       break;
+                       if (!buff->is_eop) {
+                               for (i = 1U, next_ = buff->next,
+                                    buff_ = &self->buff_ring[next_];
+                                    true; next_ = buff_->next,
+                                    buff_ = &self->buff_ring[next_], ++i) {
+                                       skb_add_rx_frag(skb, i,
+                                                       buff_->page, 0,
+                                                       buff_->len,
+                                                       SKB_TRUESIZE(buff->len -
+                                                       ETH_HLEN));
+                                       buff_->is_cleaned = 1;
+
+                                       if (buff_->is_eop)
+                                               break;
+                               }
                        }
                }
 
index 71362b7f60402545c3b4aa2bc391a1e2d3cd7f7f..fcc2328bb0d953e797c49699b11e5e5956a0d583 100644 (file)
@@ -12894,19 +12894,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        }
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void poll_bnx2x(struct net_device *dev)
-{
-       struct bnx2x *bp = netdev_priv(dev);
-       int i;
-
-       for_each_eth_queue(bp, i) {
-               struct bnx2x_fastpath *fp = &bp->fp[i];
-               napi_schedule(&bnx2x_fp(bp, fp->index, napi));
-       }
-}
-#endif
-
 static int bnx2x_validate_addr(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
@@ -13113,9 +13100,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_tx_timeout         = bnx2x_tx_timeout,
        .ndo_vlan_rx_add_vid    = bnx2x_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = bnx2x_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = poll_bnx2x,
-#endif
        .ndo_setup_tc           = __bnx2x_setup_tc,
 #ifdef CONFIG_BNX2X_SRIOV
        .ndo_set_vf_mac         = bnx2x_set_vf_mac,
index 177587f9c3f1e8560c16e2fc4e05b7edff4e0011..61957b0bbd8c9f46773ff26ac9d14759b96c3960 100644 (file)
@@ -7672,21 +7672,6 @@ static void bnxt_tx_timeout(struct net_device *dev)
        bnxt_queue_sp_work(bp);
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void bnxt_poll_controller(struct net_device *dev)
-{
-       struct bnxt *bp = netdev_priv(dev);
-       int i;
-
-       /* Only process tx rings/combined rings in netpoll mode. */
-       for (i = 0; i < bp->tx_nr_rings; i++) {
-               struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
-
-               napi_schedule(&txr->bnapi->napi);
-       }
-}
-#endif
-
 static void bnxt_timer(struct timer_list *t)
 {
        struct bnxt *bp = from_timer(bp, t, timer);
@@ -8519,9 +8504,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
        .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
        .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
        .ndo_set_vf_trust       = bnxt_set_vf_trust,
-#endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = bnxt_poll_controller,
 #endif
        .ndo_setup_tc           = bnxt_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
index f3b9fbcc705bc62304a5a09cbe54eae3d2e0b95d..790c684f08abcab21980cd9a8479f27cf516d464 100644 (file)
@@ -46,6 +46,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
                }
        }
 
+       if (i == ARRAY_SIZE(nvm_params))
+               return -EOPNOTSUPP;
+
        if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
                idx = bp->pf.port_id;
        else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
index 092c817f8f11cdda48fe8233fb04b7de2a76586a..e1594c9df4c6cda15c61a3b77afa4e2564363b57 100644 (file)
@@ -75,17 +75,23 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
        return 0;
 }
 
-static void bnxt_tc_parse_vlan(struct bnxt *bp,
-                              struct bnxt_tc_actions *actions,
-                              const struct tc_action *tc_act)
+static int bnxt_tc_parse_vlan(struct bnxt *bp,
+                             struct bnxt_tc_actions *actions,
+                             const struct tc_action *tc_act)
 {
-       if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
+       switch (tcf_vlan_action(tc_act)) {
+       case TCA_VLAN_ACT_POP:
                actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
-       } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
+               break;
+       case TCA_VLAN_ACT_PUSH:
                actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
                actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
                actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
+               break;
+       default:
+               return -EOPNOTSUPP;
        }
+       return 0;
 }
 
 static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
@@ -134,7 +140,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
 
                /* Push/pop VLAN */
                if (is_tcf_vlan(tc_act)) {
-                       bnxt_tc_parse_vlan(bp, actions, tc_act);
+                       rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
+                       if (rc)
+                               return rc;
                        continue;
                }
 
index b8f75a22fb6c97183d89c97f7ce2a9f0cd5ead5d..f152da1ce0464c5c065010813213e5f60eb111af 100644 (file)
@@ -753,7 +753,6 @@ struct cpl_abort_req_rss {
 };
 
 struct cpl_abort_req_rss6 {
-       WR_HDR;
        union opcode_tid ot;
        __be32 srqidx_status;
 };
index e2a702996db41f073879b91475c24c249292955a..13dfdfca49fc7816b8fb64c55d372b19370e4a7f 100644 (file)
@@ -332,7 +332,7 @@ static int ep93xx_poll(struct napi_struct *napi, int budget)
        return rx;
 }
 
-static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ep93xx_priv *ep = netdev_priv(dev);
        struct ep93xx_tdesc *txd;
index 3f8fe8fd79cc65beb17e334a0403b01006f0bd08..6324e80960c3e1126eb4ad1fe19aac30b74a0b6d 100644 (file)
@@ -113,7 +113,7 @@ struct net_local {
 
 /* Index to functions, as function prototypes. */
 static int net_open(struct net_device *dev);
-static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev);
 static irqreturn_t net_interrupt(int irq, void *dev_id);
 static void set_multicast_list(struct net_device *dev);
 static void net_rx(struct net_device *dev);
@@ -324,7 +324,7 @@ net_open(struct net_device *dev)
        return 0;
 }
 
-static int
+static netdev_tx_t
 net_send_packet(struct sk_buff *skb, struct net_device *dev)
 {
        struct net_local *lp = netdev_priv(dev);
index dc983450354bbf3eae79dda14d7c44815cee126d..35f6291a3672331584539831a46025994749a69e 100644 (file)
@@ -64,7 +64,8 @@ static unsigned int net_debug = NET_DEBUG;
 #define RX_AREA_END    0x0fc00
 
 static int ether1_open(struct net_device *dev);
-static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t ether1_sendpacket(struct sk_buff *skb,
+                                    struct net_device *dev);
 static irqreturn_t ether1_interrupt(int irq, void *dev_id);
 static int ether1_close(struct net_device *dev);
 static void ether1_setmulticastlist(struct net_device *dev);
@@ -667,7 +668,7 @@ ether1_timeout(struct net_device *dev)
        netif_wake_queue(dev);
 }
 
-static int
+static netdev_tx_t
 ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
 {
        int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr;
index f00a1dc2128cbcfcf590cb22bf88aa3796b6ab45..2f7ae118217fe881a53614fb436ed119bf56919f 100644 (file)
@@ -347,7 +347,7 @@ static const char init_setup[] =
        0x7f /*  *multi IA */ };
 
 static int i596_open(struct net_device *dev);
-static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static irqreturn_t i596_interrupt(int irq, void *dev_id);
 static int i596_close(struct net_device *dev);
 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
@@ -966,7 +966,7 @@ static void i596_tx_timeout (struct net_device *dev)
 }
 
 
-static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct i596_private *lp = netdev_priv(dev);
        struct tx_cmd *tx_cmd;
index 8bb15a8c2a4026939f37161629d424963dc0a082..1a86184d44c0ae8cc2f96b0d942c70711630c4df 100644 (file)
@@ -121,7 +121,8 @@ static int     sun3_82586_probe1(struct net_device *dev,int ioaddr);
 static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id);
 static int     sun3_82586_open(struct net_device *dev);
 static int     sun3_82586_close(struct net_device *dev);
-static int     sun3_82586_send_packet(struct sk_buff *,struct net_device *);
+static netdev_tx_t     sun3_82586_send_packet(struct sk_buff *,
+                                             struct net_device *);
 static struct  net_device_stats *sun3_82586_get_stats(struct net_device *dev);
 static void    set_multicast_list(struct net_device *dev);
 static void    sun3_82586_timeout(struct net_device *dev);
@@ -1002,7 +1003,8 @@ static void sun3_82586_timeout(struct net_device *dev)
  * send frame
  */
 
-static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
 {
        int len,i;
 #ifndef NO_NOPCOMMANDS
index 3726646863095dc145e8df1ccc5d082f9fd802bd..129f4e9f38dac01f424ea7337002add11a2751e5 100644 (file)
@@ -2677,12 +2677,17 @@ static int emac_init_phy(struct emac_instance *dev)
                if (of_phy_is_fixed_link(np)) {
                        int res = emac_dt_mdio_probe(dev);
 
-                       if (!res) {
-                               res = of_phy_register_fixed_link(np);
-                               if (res)
-                                       mdiobus_unregister(dev->mii_bus);
+                       if (res)
+                               return res;
+
+                       res = of_phy_register_fixed_link(np);
+                       dev->phy_dev = of_phy_find_device(np);
+                       if (res || !dev->phy_dev) {
+                               mdiobus_unregister(dev->mii_bus);
+                               return res ? res : -EINVAL;
                        }
-                       return res;
+                       emac_adjust_link(dev->ndev);
+                       put_device(&dev->phy_dev->mdio.dev);
                }
                return 0;
        }
index a903a0ba45e180c39e99c6b6a0726291203b1f91..7d42582ed48dc36f3da8b6b670d1c4f8847ffa9b 100644 (file)
@@ -504,9 +504,6 @@ void fm10k_update_stats(struct fm10k_intfc *interface);
 void fm10k_service_event_schedule(struct fm10k_intfc *interface);
 void fm10k_macvlan_schedule(struct fm10k_intfc *interface);
 void fm10k_update_rx_drop_en(struct fm10k_intfc *interface);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-void fm10k_netpoll(struct net_device *netdev);
-#endif
 
 /* Netdev */
 struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info);
index 929f538d28bc03a391340ebf6cc531b03f92b7a3..538a8467f43413f97bbf477053ba8e93560e2bcf 100644 (file)
@@ -1648,9 +1648,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
        .ndo_udp_tunnel_del     = fm10k_udp_tunnel_del,
        .ndo_dfwd_add_station   = fm10k_dfwd_add_station,
        .ndo_dfwd_del_station   = fm10k_dfwd_del_station,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = fm10k_netpoll,
-#endif
        .ndo_features_check     = fm10k_features_check,
 };
 
index 15071e4adb98c9924ab2e9a17c7f616f23104bd0..c859ababeed50e030baf7cb7041fbd20916c2265 100644 (file)
@@ -1210,28 +1210,6 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
        return IRQ_HANDLED;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- *  fm10k_netpoll - A Polling 'interrupt' handler
- *  @netdev: network interface device structure
- *
- *  This is used by netconsole to send skbs without having to re-enable
- *  interrupts. It's not called while the normal interrupt routine is executing.
- **/
-void fm10k_netpoll(struct net_device *netdev)
-{
-       struct fm10k_intfc *interface = netdev_priv(netdev);
-       int i;
-
-       /* if interface is down do nothing */
-       if (test_bit(__FM10K_DOWN, interface->state))
-               return;
-
-       for (i = 0; i < interface->num_q_vectors; i++)
-               fm10k_msix_clean_rings(0, interface->q_vector[i]);
-}
-
-#endif
 #define FM10K_ERR_MSG(type) case (type): error = #type; break
 static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
                               struct fm10k_fault *fault)
index 5906c1c1d19d82d7e37b0a891e457fea792b4153..fef6d892ed4cfe5ae293aa2fe01ef38a257c47bf 100644 (file)
@@ -396,29 +396,6 @@ static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
        adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * i40evf_netpoll - A Polling 'interrupt' handler
- * @netdev: network interface device structure
- *
- * This is used by netconsole to send skbs without having to re-enable
- * interrupts.  It's not called while the normal interrupt routine is executing.
- **/
-static void i40evf_netpoll(struct net_device *netdev)
-{
-       struct i40evf_adapter *adapter = netdev_priv(netdev);
-       int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
-       int i;
-
-       /* if interface is down do nothing */
-       if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state))
-               return;
-
-       for (i = 0; i < q_vectors; i++)
-               i40evf_msix_clean_rings(0, &adapter->q_vectors[i]);
-}
-
-#endif
 /**
  * i40evf_irq_affinity_notify - Callback for affinity changes
  * @notify: context as to what irq was changed
@@ -3229,9 +3206,6 @@ static const struct net_device_ops i40evf_netdev_ops = {
        .ndo_features_check     = i40evf_features_check,
        .ndo_fix_features       = i40evf_fix_features,
        .ndo_set_features       = i40evf_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = i40evf_netpoll,
-#endif
        .ndo_setup_tc           = i40evf_setup_tc,
 };
 
index f1e80eed2fd6d9f94eab163acb4d178d83ba4af2..3f047bb43348881c592adf3b236518ee21bc2fdd 100644 (file)
@@ -4806,30 +4806,6 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
        stats->rx_length_errors = vsi_stats->rx_length_errors;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * ice_netpoll - polling "interrupt" handler
- * @netdev: network interface device structure
- *
- * Used by netconsole to send skbs without having to re-enable interrupts.
- * This is not called in the normal interrupt path.
- */
-static void ice_netpoll(struct net_device *netdev)
-{
-       struct ice_netdev_priv *np = netdev_priv(netdev);
-       struct ice_vsi *vsi = np->vsi;
-       struct ice_pf *pf = vsi->back;
-       int i;
-
-       if (test_bit(__ICE_DOWN, vsi->state) ||
-           !test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
-               return;
-
-       for (i = 0; i < vsi->num_q_vectors; i++)
-               ice_msix_clean_rings(0, vsi->q_vectors[i]);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
 /**
  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
  * @vsi: VSI having NAPI disabled
@@ -5497,9 +5473,6 @@ static const struct net_device_ops ice_netdev_ops = {
        .ndo_validate_addr = eth_validate_addr,
        .ndo_change_mtu = ice_change_mtu,
        .ndo_get_stats64 = ice_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = ice_netpoll,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
        .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
        .ndo_set_features = ice_set_features,
index a32c576c1e656c0102989413cad114d1d8f03771..0796cef96fa335ee1907d9aa5d4d56f4fd366ca3 100644 (file)
@@ -205,10 +205,6 @@ static struct notifier_block dca_notifier = {
        .priority       = 0
 };
 #endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* for netdump / net console */
-static void igb_netpoll(struct net_device *);
-#endif
 #ifdef CONFIG_PCI_IOV
 static unsigned int max_vfs;
 module_param(max_vfs, uint, 0);
@@ -2881,9 +2877,6 @@ static const struct net_device_ops igb_netdev_ops = {
        .ndo_set_vf_spoofchk    = igb_ndo_set_vf_spoofchk,
        .ndo_set_vf_trust       = igb_ndo_set_vf_trust,
        .ndo_get_vf_config      = igb_ndo_get_vf_config,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = igb_netpoll,
-#endif
        .ndo_fix_features       = igb_fix_features,
        .ndo_set_features       = igb_set_features,
        .ndo_fdb_add            = igb_ndo_fdb_add,
@@ -9053,29 +9046,6 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
        return 0;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void igb_netpoll(struct net_device *netdev)
-{
-       struct igb_adapter *adapter = netdev_priv(netdev);
-       struct e1000_hw *hw = &adapter->hw;
-       struct igb_q_vector *q_vector;
-       int i;
-
-       for (i = 0; i < adapter->num_q_vectors; i++) {
-               q_vector = adapter->q_vector[i];
-               if (adapter->flags & IGB_FLAG_HAS_MSIX)
-                       wr32(E1000_EIMC, q_vector->eims_value);
-               else
-                       igb_irq_disable(adapter);
-               napi_schedule(&q_vector->napi);
-       }
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
 /**
  *  igb_io_error_detected - called when PCI error is detected
  *  @pdev: Pointer to PCI device
index d3e72d0f66ef428b08e4bd88508e05b734bc43a4..7722153c4ac2fdff29c0afa0627ec26228557d69 100644 (file)
@@ -81,11 +81,6 @@ static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
                                 __be16 proto, u16 vid);
 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* for netdump / net console */
-static void ixgb_netpoll(struct net_device *dev);
-#endif
-
 static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
                              enum pci_channel_state state);
 static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
@@ -348,9 +343,6 @@ static const struct net_device_ops ixgb_netdev_ops = {
        .ndo_tx_timeout         = ixgb_tx_timeout,
        .ndo_vlan_rx_add_vid    = ixgb_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = ixgb_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ixgb_netpoll,
-#endif
        .ndo_fix_features       = ixgb_fix_features,
        .ndo_set_features       = ixgb_set_features,
 };
@@ -2195,23 +2187,6 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
                ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-
-static void ixgb_netpoll(struct net_device *dev)
-{
-       struct ixgb_adapter *adapter = netdev_priv(dev);
-
-       disable_irq(adapter->pdev->irq);
-       ixgb_intr(adapter->pdev->irq, dev);
-       enable_irq(adapter->pdev->irq);
-}
-#endif
-
 /**
  * ixgb_io_error_detected - called when PCI error is detected
  * @pdev:    pointer to pci device with error
index 9a23d33a47ed52bfeb10d79d970e114ee4702d6e..f27d73a7bf16f084ea8f4ab12d905b031f514cf8 100644 (file)
@@ -8768,28 +8768,6 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev)
        return err;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void ixgbe_netpoll(struct net_device *netdev)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       int i;
-
-       /* if interface is down do nothing */
-       if (test_bit(__IXGBE_DOWN, &adapter->state))
-               return;
-
-       /* loop through and schedule all active queues */
-       for (i = 0; i < adapter->num_q_vectors; i++)
-               ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
-}
-
-#endif
-
 static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
                                   struct ixgbe_ring *ring)
 {
@@ -10251,9 +10229,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_get_vf_config      = ixgbe_ndo_get_vf_config,
        .ndo_get_stats64        = ixgbe_get_stats64,
        .ndo_setup_tc           = __ixgbe_setup_tc,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ixgbe_netpoll,
-#endif
 #ifdef IXGBE_FCOE
        .ndo_select_queue       = ixgbe_select_queue,
        .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
index d86446d202d5ed95826db225139b095d7c7a683c..5a228582423b74726de2bd040a388eb8e59df764 100644 (file)
@@ -4233,24 +4233,6 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
        return 0;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void ixgbevf_netpoll(struct net_device *netdev)
-{
-       struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-       int i;
-
-       /* if interface is down do nothing */
-       if (test_bit(__IXGBEVF_DOWN, &adapter->state))
-               return;
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4482,9 +4464,6 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
        .ndo_tx_timeout         = ixgbevf_tx_timeout,
        .ndo_vlan_rx_add_vid    = ixgbevf_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = ixgbevf_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ixgbevf_netpoll,
-#endif
        .ndo_features_check     = ixgbevf_features_check,
        .ndo_bpf                = ixgbevf_xdp,
 };
index bc80a678abc30e22910400c5c39f847317aac8ec..b4ed7d394d079e0acb05a89d8866e8e97d0e252d 100644 (file)
@@ -1890,8 +1890,8 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
                if (!data || !(rx_desc->buf_phys_addr))
                        continue;
 
-               dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
-                                MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
+               dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
+                              PAGE_SIZE, DMA_FROM_DEVICE);
                __free_page(data);
        }
 }
@@ -2008,8 +2008,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
                                skb_add_rx_frag(rxq->skb, frag_num, page,
                                                frag_offset, frag_size,
                                                PAGE_SIZE);
-                               dma_unmap_single(dev->dev.parent, phys_addr,
-                                                PAGE_SIZE, DMA_FROM_DEVICE);
+                               dma_unmap_page(dev->dev.parent, phys_addr,
+                                              PAGE_SIZE, DMA_FROM_DEVICE);
                                rxq->left_size -= frag_size;
                        }
                } else {
@@ -2039,9 +2039,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
                                                frag_offset, frag_size,
                                                PAGE_SIZE);
 
-                               dma_unmap_single(dev->dev.parent, phys_addr,
-                                                PAGE_SIZE,
-                                                DMA_FROM_DEVICE);
+                               dma_unmap_page(dev->dev.parent, phys_addr,
+                                              PAGE_SIZE, DMA_FROM_DEVICE);
 
                                rxq->left_size -= frag_size;
                        }
index 702fec82d80636660881cfd285f014f3e0be0f1a..38cc01beea79ee53e8cd2970092f0575b1b1f374 100644 (file)
@@ -3055,10 +3055,12 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
                                   cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
        }
 
-       cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
-       if (cause_tx) {
-               cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
-               mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
+       if (port->has_tx_irqs) {
+               cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+               if (cause_tx) {
+                       cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
+                       mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
+               }
        }
 
        /* Process RX packets */
index 6785661d1a72627d7cc6895359e0ece284577d96..fe49384eba48cb3f75bd33f5bfb9cf1fa15af791 100644 (file)
@@ -1286,20 +1286,6 @@ static void mlx4_en_do_set_rx_mode(struct work_struct *work)
        mutex_unlock(&mdev->state_lock);
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void mlx4_en_netpoll(struct net_device *dev)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_cq *cq;
-       int i;
-
-       for (i = 0; i < priv->tx_ring_num[TX]; i++) {
-               cq = priv->tx_cq[TX][i];
-               napi_schedule(&cq->napi);
-       }
-}
-#endif
-
 static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
 {
        u64 reg_id;
@@ -2946,9 +2932,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
        .ndo_tx_timeout         = mlx4_en_tx_timeout,
        .ndo_vlan_rx_add_vid    = mlx4_en_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = mlx4_en_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = mlx4_en_netpoll,
-#endif
        .ndo_set_features       = mlx4_en_set_features,
        .ndo_fix_features       = mlx4_en_fix_features,
        .ndo_setup_tc           = __mlx4_en_setup_tc,
@@ -2983,9 +2966,6 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
        .ndo_set_vf_link_state  = mlx4_en_set_vf_link_state,
        .ndo_get_vf_stats       = mlx4_en_get_vf_stats,
        .ndo_get_vf_config      = mlx4_en_get_vf_config,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = mlx4_en_netpoll,
-#endif
        .ndo_set_features       = mlx4_en_set_features,
        .ndo_fix_features       = mlx4_en_fix_features,
        .ndo_setup_tc           = __mlx4_en_setup_tc,
index 1f3372c1802ed0185cdb4f8ef309479549ceb3db..2df92dbd38e101aae78c99a6456aeefc76e4bb64 100644 (file)
@@ -240,7 +240,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
        struct mlx4_dev *dev = &priv->dev;
        struct mlx4_eq *eq = &priv->eq_table.eq[vec];
 
-       if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
+       if (!cpumask_available(eq->affinity_mask) ||
+           cpumask_empty(eq->affinity_mask))
                return;
 
        hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
index 3ce14d42ddc8984eecc2c19cb73f8d379b7f6498..a53736c26c0cec416b119878356e2beffe17b37e 100644 (file)
@@ -206,7 +206,7 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
        u8 own;
 
        do {
-               own = ent->lay->status_own;
+               own = READ_ONCE(ent->lay->status_own);
                if (!(own & CMD_OWNER_HW)) {
                        ent->ret = 0;
                        return;
index eddd7702680bce0284f8518e2f04920116a80e0b..e88340e196f79a4dca519f85ce174c54c144e5af 100644 (file)
@@ -183,12 +183,13 @@ static const struct tlsdev_ops mlx5e_tls_ops = {
 
 void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
 {
-       u32 caps = mlx5_accel_tls_device_caps(priv->mdev);
        struct net_device *netdev = priv->netdev;
+       u32 caps;
 
        if (!mlx5_accel_is_tls_device(priv->mdev))
                return;
 
+       caps = mlx5_accel_tls_device_caps(priv->mdev);
        if (caps & MLX5_ACCEL_TLS_TX) {
                netdev->features          |= NETIF_F_HW_TLS_TX;
                netdev->hw_features       |= NETIF_F_HW_TLS_TX;
index 5a7939e7019026aa9c08d4fba759f95cb0a813c5..54118b77dc1f6d478c5b08e7c01526dd3e5dc740 100644 (file)
@@ -4315,22 +4315,6 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
        }
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
- * reenabling interrupts.
- */
-static void mlx5e_netpoll(struct net_device *dev)
-{
-       struct mlx5e_priv *priv = netdev_priv(dev);
-       struct mlx5e_channels *chs = &priv->channels;
-
-       int i;
-
-       for (i = 0; i < chs->num; i++)
-               napi_schedule(&chs->c[i]->napi);
-}
-#endif
-
 static const struct net_device_ops mlx5e_netdev_ops = {
        .ndo_open                = mlx5e_open,
        .ndo_stop                = mlx5e_close,
@@ -4356,9 +4340,6 @@ static const struct net_device_ops mlx5e_netdev_ops = {
 #ifdef CONFIG_MLX5_EN_ARFS
        .ndo_rx_flow_steer       = mlx5e_rx_flow_steer,
 #endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller     = mlx5e_netpoll,
-#endif
 #ifdef CONFIG_MLX5_ESWITCH
        /* SRIOV E-Switch NDOs */
        .ndo_set_vf_mac          = mlx5e_set_vf_mac,
index dae1c5c5d27c5abfeac951465f8fab240bd69ff0..d2f76070ea7ca87bcc98c1382cd95504e0a60104 100644 (file)
@@ -509,7 +509,7 @@ static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
 
        sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
 
-       if (next_state == MLX5_RQC_STATE_RDY) {
+       if (next_state == MLX5_SQC_STATE_RDY) {
                MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq);
                MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca);
        }
index 930700413b1d07dd8a62ffbabf47aea995d062cf..b492152c8881bdd41a56f0a2b361a4b6bb86e40d 100644 (file)
@@ -44,8 +44,8 @@
 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
 
 #define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 1702
-#define MLXSW_SP1_FWREV_SUBMINOR 6
+#define MLXSW_SP1_FWREV_MINOR 1703
+#define MLXSW_SP1_FWREV_SUBMINOR 4
 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
 
 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
index 26bb3b18f3be0f9f20149e5e689c561074c60a26..3cdf63e35b53bc1ea3e2f445e6a3746a56655bd3 100644 (file)
@@ -91,7 +91,7 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
                struct sk_buff *skb;
                struct net_device *dev;
                u32 *buf;
-               int sz, len;
+               int sz, len, buf_len;
                u32 ifh[4];
                u32 val;
                struct frame_info info;
@@ -116,14 +116,20 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
                        err = -ENOMEM;
                        break;
                }
-               buf = (u32 *)skb_put(skb, info.len);
+               buf_len = info.len - ETH_FCS_LEN;
+               buf = (u32 *)skb_put(skb, buf_len);
 
                len = 0;
                do {
                        sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
                        *buf++ = val;
                        len += sz;
-               } while ((sz == 4) && (len < info.len));
+               } while (len < buf_len);
+
+               /* Read the FCS and discard it */
+               sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
+               /* Update the statistics if part of the FCS was read before */
+               len -= ETH_FCS_LEN - sz;
 
                if (sz < 0) {
                        err = sz;
index 253bdaef150557a7e20ded3de921021a051a9ca7..8ed38fd5a8520e0e125d96bc2ee7c53d891a5969 100644 (file)
@@ -3146,21 +3146,6 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
        return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void nfp_net_netpoll(struct net_device *netdev)
-{
-       struct nfp_net *nn = netdev_priv(netdev);
-       int i;
-
-       /* nfp_net's NAPIs are statically allocated so even if there is a race
-        * with reconfig path this will simply try to schedule some disabled
-        * NAPI instances.
-        */
-       for (i = 0; i < nn->dp.num_stack_tx_rings; i++)
-               napi_schedule_irqoff(&nn->r_vecs[i].napi);
-}
-#endif
-
 static void nfp_net_stat64(struct net_device *netdev,
                           struct rtnl_link_stats64 *stats)
 {
@@ -3519,9 +3504,6 @@ const struct net_device_ops nfp_net_netdev_ops = {
        .ndo_get_stats64        = nfp_net_stat64,
        .ndo_vlan_rx_add_vid    = nfp_net_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = nfp_net_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = nfp_net_netpoll,
-#endif
        .ndo_set_vf_mac         = nfp_app_set_vf_mac,
        .ndo_set_vf_vlan        = nfp_app_set_vf_vlan,
        .ndo_set_vf_spoofchk    = nfp_app_set_vf_spoofchk,
index 6bb76e6d3c141c7fb5f4854dbd6cef514b090bf1..f5459de6d60a6abe4d2fec817b429dc5897d74ba 100644 (file)
@@ -190,10 +190,8 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
 
 static void
 qed_dcbx_set_params(struct qed_dcbx_results *p_data,
-                   struct qed_hw_info *p_info,
-                   bool enable,
-                   u8 prio,
-                   u8 tc,
+                   struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                   bool enable, u8 prio, u8 tc,
                    enum dcbx_protocol_type type,
                    enum qed_pci_personality personality)
 {
@@ -206,19 +204,30 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
        else
                p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
 
+       /* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */
+       if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) ||
+            test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)))
+               p_data->arr[type].dont_add_vlan0 = true;
+
        /* QM reconf data */
-       if (p_info->personality == personality)
-               qed_hw_info_set_offload_tc(p_info, tc);
+       if (p_hwfn->hw_info.personality == personality)
+               qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc);
+
+       /* Configure dcbx vlan priority in doorbell block for roce EDPM */
+       if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
+           type == DCBX_PROTOCOL_ROCE) {
+               qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+               qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1);
+       }
 }
 
 /* Update app protocol data and hw_info fields with the TLV info */
 static void
 qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
-                        struct qed_hwfn *p_hwfn,
-                        bool enable,
-                        u8 prio, u8 tc, enum dcbx_protocol_type type)
+                        struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                        bool enable, u8 prio, u8 tc,
+                        enum dcbx_protocol_type type)
 {
-       struct qed_hw_info *p_info = &p_hwfn->hw_info;
        enum qed_pci_personality personality;
        enum dcbx_protocol_type id;
        int i;
@@ -231,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
 
                personality = qed_dcbx_app_update[i].personality;
 
-               qed_dcbx_set_params(p_data, p_info, enable,
+               qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable,
                                    prio, tc, type, personality);
        }
 }
@@ -265,7 +274,7 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
  * reconfiguring QM. Get protocol specific data for PF update ramrod command.
  */
 static int
-qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
+qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
                     struct qed_dcbx_results *p_data,
                     struct dcbx_app_priority_entry *p_tbl,
                     u32 pri_tc_tbl, int count, u8 dcbx_version)
@@ -309,7 +318,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
                                enable = true;
                        }
 
-                       qed_dcbx_update_app_info(p_data, p_hwfn, enable,
+                       qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
                                                 priority, tc, type);
                }
        }
@@ -331,7 +340,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
                        continue;
 
                enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
-               qed_dcbx_update_app_info(p_data, p_hwfn, enable,
+               qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
                                         priority, tc, type);
        }
 
@@ -341,7 +350,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
 /* Parse app TLV's to update TC information in hw_info structure for
  * reconfiguring QM. Get protocol specific data for PF update ramrod command.
  */
-static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
+static int
+qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        struct dcbx_app_priority_feature *p_app;
        struct dcbx_app_priority_entry *p_tbl;
@@ -365,7 +375,7 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
        p_info = &p_hwfn->hw_info;
        num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
 
-       rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
+       rc = qed_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl,
                                  num_entries, dcbx_version);
        if (rc)
                return rc;
@@ -891,7 +901,7 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
                return rc;
 
        if (type == QED_DCBX_OPERATIONAL_MIB) {
-               rc = qed_dcbx_process_mib_info(p_hwfn);
+               rc = qed_dcbx_process_mib_info(p_hwfn, p_ptt);
                if (!rc) {
                        /* reconfigure tcs of QM queues according
                         * to negotiation results
@@ -954,6 +964,7 @@ static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
        p_data->dcb_enable_flag = p_src->arr[type].enable;
        p_data->dcb_priority = p_src->arr[type].priority;
        p_data->dcb_tc = p_src->arr[type].tc;
+       p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0;
 }
 
 /* Set pf update ramrod command params */
index a4d688c04e1827ab750bc6fec1d442ea2eca7840..01f253ea4b229201c727fd22284759aadf3bc59c 100644 (file)
@@ -55,6 +55,7 @@ struct qed_dcbx_app_data {
        u8 update;              /* Update indication */
        u8 priority;            /* Priority */
        u8 tc;                  /* Traffic Class */
+       bool dont_add_vlan0;    /* Do not insert a vlan tag with id 0 */
 };
 
 #define QED_DCBX_VERSION_DISABLED       0
index 016ca8a7ec8aa11fe8fc3b035dadfd431ad8d4fc..97f073fd3725d7bbc8481a1f8cfd748087a55bef 100644 (file)
@@ -1706,7 +1706,7 @@ static int qed_vf_start(struct qed_hwfn *p_hwfn,
 int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
 {
        struct qed_load_req_params load_req_params;
-       u32 load_code, param, drv_mb_param;
+       u32 load_code, resp, param, drv_mb_param;
        bool b_default_mtu = true;
        struct qed_hwfn *p_hwfn;
        int rc = 0, mfw_rc, i;
@@ -1852,6 +1852,19 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
 
        if (IS_PF(cdev)) {
                p_hwfn = QED_LEADING_HWFN(cdev);
+
+               /* Get pre-negotiated values for stag, bandwidth etc. */
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_SPQ,
+                          "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
+               drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET;
+               rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+                                DRV_MSG_CODE_GET_OEM_UPDATES,
+                                drv_mb_param, &resp, &param);
+               if (rc)
+                       DP_NOTICE(p_hwfn,
+                                 "Failed to send GET_OEM_UPDATES attention request\n");
+
                drv_mb_param = STORM_FW_VERSION;
                rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
                                 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
index 8faceb691657ff26fb2b95d962e19d75efa867c5..9b3ef00e57824a5fd72e7ae06e3d1d44b59b176b 100644 (file)
@@ -12414,6 +12414,7 @@ struct public_drv_mb {
 #define DRV_MSG_SET_RESOURCE_VALUE_MSG         0x35000000
 #define DRV_MSG_CODE_OV_UPDATE_WOL              0x38000000
 #define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE     0x39000000
+#define DRV_MSG_CODE_GET_OEM_UPDATES            0x41000000
 
 #define DRV_MSG_CODE_BW_UPDATE_ACK             0x32000000
 #define DRV_MSG_CODE_NIG_DRAIN                 0x30000000
@@ -12541,6 +12542,9 @@ struct public_drv_mb {
 #define DRV_MB_PARAM_ESWITCH_MODE_VEB  0x1
 #define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
 
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK    0x1
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET  0
+
 #define DRV_MB_PARAM_SET_LED_MODE_OPER         0x0
 #define DRV_MB_PARAM_SET_LED_MODE_ON           0x1
 #define DRV_MB_PARAM_SET_LED_MODE_OFF          0x2
index 5d37ec7e9b0b7b2bc3785ec00ce2e23314b7c955..58c7eb9d8e1b85893ea33c7de48426e5f555bdc5 100644 (file)
@@ -1581,13 +1581,29 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
                                                 FUNC_MF_CFG_OV_STAG_MASK;
        p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
-       if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) &&
-           (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) {
-               qed_wr(p_hwfn, p_ptt,
-                      NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan);
+       if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
+               if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
+                       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
+                              p_hwfn->hw_info.ovlan);
+                       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
+
+                       /* Configure DB to add external vlan to EDPM packets */
+                       qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+                       qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
+                              p_hwfn->hw_info.ovlan);
+               } else {
+                       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
+                       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
+                       qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
+                       qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
+               }
+
                qed_sp_pf_update_stag(p_hwfn);
        }
 
+       DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan  = %d hw_mode = 0x%x\n",
+                  p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
+
        /* Acknowledge the MFW */
        qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
                    &resp, &param);
index f736f70956fd3763f3f94bffa0e009dec28b9516..2440970882c48766c9869a57084826a31a8ffcfe 100644 (file)
        0x00c000UL
 #define  DORQ_REG_IFEN \
        0x100040UL
+#define DORQ_REG_TAG1_OVRD_MODE \
+       0x1008b4UL
+#define DORQ_REG_PF_PCP_BB_K2 \
+       0x1008c4UL
+#define DORQ_REG_PF_EXT_VID_BB_K2 \
+       0x1008c8UL
 #define DORQ_REG_DB_DROP_REASON \
        0x100a2cUL
 #define DORQ_REG_DB_DROP_DETAILS \
index bb529ff2ca818c9218f3a0ecc52b2f5fdd6af488..ab30aaeac6d377e6303fafc57f820e3022ae7773 100644 (file)
@@ -4071,6 +4071,15 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
        phy_speed_up(dev->phydev);
 
        genphy_soft_reset(dev->phydev);
+
+       /* It was reported that chip version 33 ends up with 10MBit/Half on a
+        * 1GBit link after resuming from S3. For whatever reason the PHY on
+        * this chip doesn't properly start a renegotiation when soft-reset.
+        * Explicitly requesting a renegotiation fixes this.
+        */
+       if (tp->mac_version == RTL_GIGA_MAC_VER_33 &&
+           dev->phydev->autoneg == AUTONEG_ENABLE)
+               phy_restart_aneg(dev->phydev);
 }
 
 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
index 1470fc12282b255181838457ff6a07a362e3e321..9b6bf557a2f5ffde5fe405f17e3307014f0c6a05 100644 (file)
@@ -428,6 +428,7 @@ enum EIS_BIT {
        EIS_CULF1       = 0x00000080,
        EIS_TFFF        = 0x00000100,
        EIS_QFS         = 0x00010000,
+       EIS_RESERVED    = (GENMASK(31, 17) | GENMASK(15, 11)),
 };
 
 /* RIC0 */
@@ -472,6 +473,7 @@ enum RIS0_BIT {
        RIS0_FRF15      = 0x00008000,
        RIS0_FRF16      = 0x00010000,
        RIS0_FRF17      = 0x00020000,
+       RIS0_RESERVED   = GENMASK(31, 18),
 };
 
 /* RIC1 */
@@ -528,6 +530,7 @@ enum RIS2_BIT {
        RIS2_QFF16      = 0x00010000,
        RIS2_QFF17      = 0x00020000,
        RIS2_RFFF       = 0x80000000,
+       RIS2_RESERVED   = GENMASK(30, 18),
 };
 
 /* TIC */
@@ -544,6 +547,7 @@ enum TIS_BIT {
        TIS_FTF1        = 0x00000002,   /* Undocumented? */
        TIS_TFUF        = 0x00000100,
        TIS_TFWF        = 0x00000200,
+       TIS_RESERVED    = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
 };
 
 /* ISS */
@@ -617,6 +621,7 @@ enum GIC_BIT {
 enum GIS_BIT {
        GIS_PTCF        = 0x00000001,   /* Undocumented? */
        GIS_PTMF        = 0x00000004,
+       GIS_RESERVED    = GENMASK(15, 10),
 };
 
 /* GIE (R-Car Gen3 only) */
index aff5516b781e27067efc9c1cca19f11bea19dd19..d6f753925352d41758945aca8d5ef6340cceee9a 100644 (file)
@@ -739,10 +739,11 @@ static void ravb_error_interrupt(struct net_device *ndev)
        u32 eis, ris2;
 
        eis = ravb_read(ndev, EIS);
-       ravb_write(ndev, ~EIS_QFS, EIS);
+       ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
        if (eis & EIS_QFS) {
                ris2 = ravb_read(ndev, RIS2);
-               ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
+               ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
+                          RIS2);
 
                /* Receive Descriptor Empty int */
                if (ris2 & RIS2_QFF0)
@@ -795,7 +796,7 @@ static bool ravb_timestamp_interrupt(struct net_device *ndev)
        u32 tis = ravb_read(ndev, TIS);
 
        if (tis & TIS_TFUF) {
-               ravb_write(ndev, ~TIS_TFUF, TIS);
+               ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
                ravb_get_tx_tstamp(ndev);
                return true;
        }
@@ -930,7 +931,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
                /* Processing RX Descriptor Ring */
                if (ris0 & mask) {
                        /* Clear RX interrupt */
-                       ravb_write(ndev, ~mask, RIS0);
+                       ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
                        if (ravb_rx(ndev, &quota, q))
                                goto out;
                }
@@ -938,7 +939,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
                if (tis & mask) {
                        spin_lock_irqsave(&priv->lock, flags);
                        /* Clear TX interrupt */
-                       ravb_write(ndev, ~mask, TIS);
+                       ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
                        ravb_tx_free(ndev, q, true);
                        netif_wake_subqueue(ndev, q);
                        mmiowb();
index 0721b5c35d91d613256159763c7e4430247da3af..dce2a40a31e336d6a45b8e393a384787743859f9 100644 (file)
@@ -315,7 +315,7 @@ void ravb_ptp_interrupt(struct net_device *ndev)
                }
        }
 
-       ravb_write(ndev, ~gis, GIS);
+       ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
 }
 
 void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
index c5bc124b41a93a69cffccd7ca44602292309686c..d1bb73bf99148b2cd29aff4e8c2f943369b0ab4c 100644 (file)
@@ -77,7 +77,8 @@ static void   ether3_setmulticastlist(struct net_device *dev);
 static int     ether3_rx(struct net_device *dev, unsigned int maxcnt);
 static void    ether3_tx(struct net_device *dev);
 static int     ether3_open (struct net_device *dev);
-static int     ether3_sendpacket (struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t     ether3_sendpacket(struct sk_buff *skb,
+                                         struct net_device *dev);
 static irqreturn_t ether3_interrupt (int irq, void *dev_id);
 static int     ether3_close (struct net_device *dev);
 static void    ether3_setmulticastlist (struct net_device *dev);
@@ -481,7 +482,7 @@ static void ether3_timeout(struct net_device *dev)
 /*
  * Transmit a packet
  */
-static int
+static netdev_tx_t
 ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
 {
        unsigned long flags;
index 573691bc3b71fb31390dc33c071599a2e83c0ad9..70cce63a6081698169f5737d7eda9c4d3fc069fe 100644 (file)
@@ -578,7 +578,8 @@ static inline int sgiseeq_reset(struct net_device *dev)
        return 0;
 }
 
-static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct sgiseeq_private *sp = netdev_priv(dev);
        struct hpc3_ethregs *hregs = sp->hregs;
index 18d533fdf14c491a1afa48ea83a27103ac340caf..3140999642ba93dbcfe9609783fe18963e93088b 100644 (file)
@@ -99,7 +99,7 @@ struct ioc3_private {
 
 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 static void ioc3_set_multicast_list(struct net_device *dev);
-static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static void ioc3_timeout(struct net_device *dev);
 static inline unsigned int ioc3_hash(const unsigned char *addr);
 static inline void ioc3_stop(struct ioc3_private *ip);
@@ -1390,7 +1390,7 @@ static struct pci_driver ioc3_driver = {
        .remove         = ioc3_remove_one,
 };
 
-static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        unsigned long data;
        struct ioc3_private *ip = netdev_priv(dev);
index ea55abd62ec7094f4be4182f0602abe6b7e5e466..703fbbefea44d1c830b4f4244fd416ec504f88d2 100644 (file)
@@ -697,7 +697,7 @@ static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb)
 /*
  * Transmit a packet (called by the kernel)
  */
-static int meth_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev)
 {
        struct meth_private *priv = netdev_priv(dev);
        unsigned long flags;
index 1854f270ad6611a57d381f48bdf0956caf3fdad9..b1b305f8f4143626fc664445182c5e2afc38b87c 100644 (file)
@@ -258,10 +258,10 @@ struct stmmac_safety_stats {
 #define MAX_DMA_RIWT           0xff
 #define MIN_DMA_RIWT           0x20
 /* Tx coalesce parameters */
-#define STMMAC_COAL_TX_TIMER   40000
+#define STMMAC_COAL_TX_TIMER   1000
 #define STMMAC_MAX_COAL_TX_TICK        100000
 #define STMMAC_TX_MAX_FRAMES   256
-#define STMMAC_TX_FRAMES       64
+#define STMMAC_TX_FRAMES       25
 
 /* Packets types */
 enum packets_types {
index c0a855b7ab3b4304a0ba734117ee63d903701df8..63e1064b27a245435255c2de9e1c934ccbef8ffb 100644 (file)
@@ -48,6 +48,8 @@ struct stmmac_tx_info {
 
 /* Frequently used values are kept adjacent for cache effect */
 struct stmmac_tx_queue {
+       u32 tx_count_frames;
+       struct timer_list txtimer;
        u32 queue_index;
        struct stmmac_priv *priv_data;
        struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
@@ -73,7 +75,14 @@ struct stmmac_rx_queue {
        u32 rx_zeroc_thresh;
        dma_addr_t dma_rx_phy;
        u32 rx_tail_addr;
+};
+
+struct stmmac_channel {
        struct napi_struct napi ____cacheline_aligned_in_smp;
+       struct stmmac_priv *priv_data;
+       u32 index;
+       int has_rx;
+       int has_tx;
 };
 
 struct stmmac_tc_entry {
@@ -109,14 +118,12 @@ struct stmmac_pps_cfg {
 
 struct stmmac_priv {
        /* Frequently used values are kept adjacent for cache effect */
-       u32 tx_count_frames;
        u32 tx_coal_frames;
        u32 tx_coal_timer;
 
        int tx_coalesce;
        int hwts_tx_en;
        bool tx_path_in_lpi_mode;
-       struct timer_list txtimer;
        bool tso;
 
        unsigned int dma_buf_sz;
@@ -137,6 +144,9 @@ struct stmmac_priv {
        /* TX Queue */
        struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
 
+       /* Generic channel for NAPI */
+       struct stmmac_channel channel[STMMAC_CH_MAX];
+
        bool oldlink;
        int speed;
        int oldduplex;
index 9f458bb16f2a6edb6ab8cca33c00176b1cce1d67..75896d6ba6e2b8e5990df01f7c3ff9d7abe96137 100644 (file)
@@ -148,12 +148,14 @@ static void stmmac_verify_args(void)
 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
 {
        u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+       u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+       u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
        u32 queue;
 
-       for (queue = 0; queue < rx_queues_cnt; queue++) {
-               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       for (queue = 0; queue < maxq; queue++) {
+               struct stmmac_channel *ch = &priv->channel[queue];
 
-               napi_disable(&rx_q->napi);
+               napi_disable(&ch->napi);
        }
 }
 
@@ -164,12 +166,14 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv)
 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
 {
        u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+       u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+       u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
        u32 queue;
 
-       for (queue = 0; queue < rx_queues_cnt; queue++) {
-               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       for (queue = 0; queue < maxq; queue++) {
+               struct stmmac_channel *ch = &priv->channel[queue];
 
-               napi_enable(&rx_q->napi);
+               napi_enable(&ch->napi);
        }
 }
 
@@ -1843,18 +1847,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
  * @queue: TX queue index
  * Description: it reclaims the transmit resources after transmission completes.
  */
-static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
+static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
 {
        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
        unsigned int bytes_compl = 0, pkts_compl = 0;
-       unsigned int entry;
+       unsigned int entry, count = 0;
 
-       netif_tx_lock(priv->dev);
+       __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
 
        priv->xstats.tx_clean++;
 
        entry = tx_q->dirty_tx;
-       while (entry != tx_q->cur_tx) {
+       while ((entry != tx_q->cur_tx) && (count < budget)) {
                struct sk_buff *skb = tx_q->tx_skbuff[entry];
                struct dma_desc *p;
                int status;
@@ -1870,6 +1874,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
                if (unlikely(status & tx_dma_own))
                        break;
 
+               count++;
+
                /* Make sure descriptor fields are read after reading
                 * the own bit.
                 */
@@ -1937,7 +1943,10 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
                stmmac_enable_eee_mode(priv);
                mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
        }
-       netif_tx_unlock(priv->dev);
+
+       __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
+
+       return count;
 }
 
 /**
@@ -2020,6 +2029,33 @@ static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
        return false;
 }
 
+static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
+{
+       int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
+                                                &priv->xstats, chan);
+       struct stmmac_channel *ch = &priv->channel[chan];
+       bool needs_work = false;
+
+       if ((status & handle_rx) && ch->has_rx) {
+               needs_work = true;
+       } else {
+               status &= ~handle_rx;
+       }
+
+       if ((status & handle_tx) && ch->has_tx) {
+               needs_work = true;
+       } else {
+               status &= ~handle_tx;
+       }
+
+       if (needs_work && napi_schedule_prep(&ch->napi)) {
+               stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
+               __napi_schedule(&ch->napi);
+       }
+
+       return status;
+}
+
 /**
  * stmmac_dma_interrupt - DMA ISR
  * @priv: driver private structure
@@ -2034,57 +2070,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
        u32 channels_to_check = tx_channel_count > rx_channel_count ?
                                tx_channel_count : rx_channel_count;
        u32 chan;
-       bool poll_scheduled = false;
        int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
 
        /* Make sure we never check beyond our status buffer. */
        if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
                channels_to_check = ARRAY_SIZE(status);
 
-       /* Each DMA channel can be used for rx and tx simultaneously, yet
-        * napi_struct is embedded in struct stmmac_rx_queue rather than in a
-        * stmmac_channel struct.
-        * Because of this, stmmac_poll currently checks (and possibly wakes)
-        * all tx queues rather than just a single tx queue.
-        */
        for (chan = 0; chan < channels_to_check; chan++)
-               status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
-                               &priv->xstats, chan);
-
-       for (chan = 0; chan < rx_channel_count; chan++) {
-               if (likely(status[chan] & handle_rx)) {
-                       struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
-
-                       if (likely(napi_schedule_prep(&rx_q->napi))) {
-                               stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
-                               __napi_schedule(&rx_q->napi);
-                               poll_scheduled = true;
-                       }
-               }
-       }
-
-       /* If we scheduled poll, we already know that tx queues will be checked.
-        * If we didn't schedule poll, see if any DMA channel (used by tx) has a
-        * completed transmission, if so, call stmmac_poll (once).
-        */
-       if (!poll_scheduled) {
-               for (chan = 0; chan < tx_channel_count; chan++) {
-                       if (status[chan] & handle_tx) {
-                               /* It doesn't matter what rx queue we choose
-                                * here. We use 0 since it always exists.
-                                */
-                               struct stmmac_rx_queue *rx_q =
-                                       &priv->rx_queue[0];
-
-                               if (likely(napi_schedule_prep(&rx_q->napi))) {
-                                       stmmac_disable_dma_irq(priv,
-                                                       priv->ioaddr, chan);
-                                       __napi_schedule(&rx_q->napi);
-                               }
-                               break;
-                       }
-               }
-       }
+               status[chan] = stmmac_napi_check(priv, chan);
 
        for (chan = 0; chan < tx_channel_count; chan++) {
                if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
@@ -2220,8 +2213,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
                stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
                                    tx_q->dma_tx_phy, chan);
 
-               tx_q->tx_tail_addr = tx_q->dma_tx_phy +
-                           (DMA_TX_SIZE * sizeof(struct dma_desc));
+               tx_q->tx_tail_addr = tx_q->dma_tx_phy;
                stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
                                       tx_q->tx_tail_addr, chan);
        }
@@ -2233,6 +2225,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
        return ret;
 }
 
+static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
+{
+       struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+       mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
+}
+
 /**
  * stmmac_tx_timer - mitigation sw timer for tx.
  * @data: data pointer
@@ -2241,13 +2240,14 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
  */
 static void stmmac_tx_timer(struct timer_list *t)
 {
-       struct stmmac_priv *priv = from_timer(priv, t, txtimer);
-       u32 tx_queues_count = priv->plat->tx_queues_to_use;
-       u32 queue;
+       struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
+       struct stmmac_priv *priv = tx_q->priv_data;
+       struct stmmac_channel *ch;
+
+       ch = &priv->channel[tx_q->queue_index];
 
-       /* let's scan all the tx queues */
-       for (queue = 0; queue < tx_queues_count; queue++)
-               stmmac_tx_clean(priv, queue);
+       if (likely(napi_schedule_prep(&ch->napi)))
+               __napi_schedule(&ch->napi);
 }
 
 /**
@@ -2260,11 +2260,17 @@ static void stmmac_tx_timer(struct timer_list *t)
  */
 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
 {
+       u32 tx_channel_count = priv->plat->tx_queues_to_use;
+       u32 chan;
+
        priv->tx_coal_frames = STMMAC_TX_FRAMES;
        priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
-       timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
-       priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
-       add_timer(&priv->txtimer);
+
+       for (chan = 0; chan < tx_channel_count; chan++) {
+               struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+
+               timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
+       }
 }
 
 static void stmmac_set_rings_length(struct stmmac_priv *priv)
@@ -2592,6 +2598,7 @@ static void stmmac_hw_teardown(struct net_device *dev)
 static int stmmac_open(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
+       u32 chan;
        int ret;
 
        stmmac_check_ether_addr(priv);
@@ -2688,7 +2695,9 @@ static int stmmac_open(struct net_device *dev)
        if (dev->phydev)
                phy_stop(dev->phydev);
 
-       del_timer_sync(&priv->txtimer);
+       for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+               del_timer_sync(&priv->tx_queue[chan].txtimer);
+
        stmmac_hw_teardown(dev);
 init_error:
        free_dma_desc_resources(priv);
@@ -2708,6 +2717,7 @@ static int stmmac_open(struct net_device *dev)
 static int stmmac_release(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
+       u32 chan;
 
        if (priv->eee_enabled)
                del_timer_sync(&priv->eee_ctrl_timer);
@@ -2722,7 +2732,8 @@ static int stmmac_release(struct net_device *dev)
 
        stmmac_disable_all_queues(priv);
 
-       del_timer_sync(&priv->txtimer);
+       for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+               del_timer_sync(&priv->tx_queue[chan].txtimer);
 
        /* Free the IRQ lines */
        free_irq(dev->irq, dev);
@@ -2936,14 +2947,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
        priv->xstats.tx_tso_nfrags += nfrags;
 
        /* Manage tx mitigation */
-       priv->tx_count_frames += nfrags + 1;
-       if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
-               mod_timer(&priv->txtimer,
-                         STMMAC_COAL_TIMER(priv->tx_coal_timer));
-       } else {
-               priv->tx_count_frames = 0;
+       tx_q->tx_count_frames += nfrags + 1;
+       if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
                stmmac_set_tx_ic(priv, desc);
                priv->xstats.tx_set_ic_bit++;
+               tx_q->tx_count_frames = 0;
+       } else {
+               stmmac_tx_timer_arm(priv, queue);
        }
 
        skb_tx_timestamp(skb);
@@ -2992,6 +3002,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
 
        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
 
+       tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
        stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
 
        return NETDEV_TX_OK;
@@ -3146,14 +3157,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
         * This approach takes care about the fragments: desc is the first
         * element in case of no SG.
         */
-       priv->tx_count_frames += nfrags + 1;
-       if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
-               mod_timer(&priv->txtimer,
-                         STMMAC_COAL_TIMER(priv->tx_coal_timer));
-       } else {
-               priv->tx_count_frames = 0;
+       tx_q->tx_count_frames += nfrags + 1;
+       if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
                stmmac_set_tx_ic(priv, desc);
                priv->xstats.tx_set_ic_bit++;
+               tx_q->tx_count_frames = 0;
+       } else {
+               stmmac_tx_timer_arm(priv, queue);
        }
 
        skb_tx_timestamp(skb);
@@ -3199,6 +3209,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
 
        stmmac_enable_dma_transmission(priv, priv->ioaddr);
+
+       tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
        stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
 
        return NETDEV_TX_OK;
@@ -3319,6 +3331,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
 {
        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       struct stmmac_channel *ch = &priv->channel[queue];
        unsigned int entry = rx_q->cur_rx;
        int coe = priv->hw->rx_csum;
        unsigned int next_entry;
@@ -3491,7 +3504,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                        else
                                skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-                       napi_gro_receive(&rx_q->napi, skb);
+                       napi_gro_receive(&ch->napi, skb);
 
                        priv->dev->stats.rx_packets++;
                        priv->dev->stats.rx_bytes += frame_len;
@@ -3514,27 +3527,33 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
  *  Description :
  *  To look at the incoming frames and clear the tx resources.
  */
-static int stmmac_poll(struct napi_struct *napi, int budget)
+static int stmmac_napi_poll(struct napi_struct *napi, int budget)
 {
-       struct stmmac_rx_queue *rx_q =
-               container_of(napi, struct stmmac_rx_queue, napi);
-       struct stmmac_priv *priv = rx_q->priv_data;
-       u32 tx_count = priv->plat->tx_queues_to_use;
-       u32 chan = rx_q->queue_index;
-       int work_done = 0;
-       u32 queue;
+       struct stmmac_channel *ch =
+               container_of(napi, struct stmmac_channel, napi);
+       struct stmmac_priv *priv = ch->priv_data;
+       int work_done = 0, work_rem = budget;
+       u32 chan = ch->index;
 
        priv->xstats.napi_poll++;
 
-       /* check all the queues */
-       for (queue = 0; queue < tx_count; queue++)
-               stmmac_tx_clean(priv, queue);
+       if (ch->has_tx) {
+               int done = stmmac_tx_clean(priv, work_rem, chan);
 
-       work_done = stmmac_rx(priv, budget, rx_q->queue_index);
-       if (work_done < budget) {
-               napi_complete_done(napi, work_done);
-               stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+               work_done += done;
+               work_rem -= done;
+       }
+
+       if (ch->has_rx) {
+               int done = stmmac_rx(priv, work_rem, chan);
+
+               work_done += done;
+               work_rem -= done;
        }
+
+       if (work_done < budget && napi_complete_done(napi, work_done))
+               stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+
        return work_done;
 }
 
@@ -4198,8 +4217,8 @@ int stmmac_dvr_probe(struct device *device,
 {
        struct net_device *ndev = NULL;
        struct stmmac_priv *priv;
+       u32 queue, maxq;
        int ret = 0;
-       u32 queue;
 
        ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
                                  MTL_MAX_TX_QUEUES,
@@ -4322,11 +4341,22 @@ int stmmac_dvr_probe(struct device *device,
                         "Enable RX Mitigation via HW Watchdog Timer\n");
        }
 
-       for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
-               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       /* Setup channels NAPI */
+       maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
 
-               netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
-                              (8 * priv->plat->rx_queues_to_use));
+       for (queue = 0; queue < maxq; queue++) {
+               struct stmmac_channel *ch = &priv->channel[queue];
+
+               ch->priv_data = priv;
+               ch->index = queue;
+
+               if (queue < priv->plat->rx_queues_to_use)
+                       ch->has_rx = true;
+               if (queue < priv->plat->tx_queues_to_use)
+                       ch->has_tx = true;
+
+               netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
+                              NAPI_POLL_WEIGHT);
        }
 
        mutex_init(&priv->lock);
@@ -4372,10 +4402,10 @@ int stmmac_dvr_probe(struct device *device,
            priv->hw->pcs != STMMAC_PCS_RTBI)
                stmmac_mdio_unregister(ndev);
 error_mdio_register:
-       for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
-               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       for (queue = 0; queue < maxq; queue++) {
+               struct stmmac_channel *ch = &priv->channel[queue];
 
-               netif_napi_del(&rx_q->napi);
+               netif_napi_del(&ch->napi);
        }
 error_hw_init:
        destroy_workqueue(priv->wq);
index 2bdfb39215e9ce749ecc5fac8f4a22226c081bc4..d8ba512f166ad38f10f6a01c1ef10c564bdd808e 100644 (file)
@@ -835,7 +835,7 @@ static void w5100_tx_work(struct work_struct *work)
        w5100_tx_skb(priv->ndev, skb);
 }
 
-static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
 {
        struct w5100_priv *priv = netdev_priv(ndev);
 
index 56ae573001e8e76c8cbd5300ad4fca4873cbc2ae..80fdbff67d82a7519a3a2eff210cb71c029ee126 100644 (file)
@@ -365,7 +365,7 @@ static void w5300_tx_timeout(struct net_device *ndev)
        netif_wake_queue(ndev);
 }
 
-static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
 {
        struct w5300_priv *priv = netdev_priv(ndev);
 
index 740655261e5b7347116d2a5b53445c8d023cb49c..83060fb349f4d5d458e762eb540afe0de2b935d6 100644 (file)
@@ -349,6 +349,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
        }
        if (bus->started)
                bus->socket_ops->start(bus->sfp);
+       bus->netdev->sfp_bus = bus;
        bus->registered = true;
        return 0;
 }
@@ -357,6 +358,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
 {
        const struct sfp_upstream_ops *ops = bus->upstream_ops;
 
+       bus->netdev->sfp_bus = NULL;
        if (bus->registered) {
                if (bus->started)
                        bus->socket_ops->stop(bus->sfp);
@@ -438,7 +440,6 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
 {
        bus->upstream_ops = NULL;
        bus->upstream = NULL;
-       bus->netdev->sfp_bus = NULL;
        bus->netdev = NULL;
 }
 
@@ -467,7 +468,6 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
                bus->upstream_ops = ops;
                bus->upstream = upstream;
                bus->netdev = ndev;
-               ndev->sfp_bus = bus;
 
                if (bus->sfp) {
                        ret = sfp_register_bus(bus);
index ebd07ad82431ec78c3801999fdc986188216655a..e2648b5a3861e51dc6c40d19e1198a5f3f7ca7af 100644 (file)
@@ -1153,43 +1153,6 @@ static netdev_features_t tun_net_fix_features(struct net_device *dev,
 
        return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
 }
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void tun_poll_controller(struct net_device *dev)
-{
-       /*
-        * Tun only receives frames when:
-        * 1) the char device endpoint gets data from user space
-        * 2) the tun socket gets a sendmsg call from user space
-        * If NAPI is not enabled, since both of those are synchronous
-        * operations, we are guaranteed never to have pending data when we poll
-        * for it so there is nothing to do here but return.
-        * We need this though so netpoll recognizes us as an interface that
-        * supports polling, which enables bridge devices in virt setups to
-        * still use netconsole
-        * If NAPI is enabled, however, we need to schedule polling for all
-        * queues unless we are using napi_gro_frags(), which we call in
-        * process context and not in NAPI context.
-        */
-       struct tun_struct *tun = netdev_priv(dev);
-
-       if (tun->flags & IFF_NAPI) {
-               struct tun_file *tfile;
-               int i;
-
-               if (tun_napi_frags_enabled(tun))
-                       return;
-
-               rcu_read_lock();
-               for (i = 0; i < tun->numqueues; i++) {
-                       tfile = rcu_dereference(tun->tfiles[i]);
-                       if (tfile->napi_enabled)
-                               napi_schedule(&tfile->napi);
-               }
-               rcu_read_unlock();
-       }
-       return;
-}
-#endif
 
 static void tun_set_headroom(struct net_device *dev, int new_hr)
 {
@@ -1283,9 +1246,6 @@ static const struct net_device_ops tun_netdev_ops = {
        .ndo_start_xmit         = tun_net_xmit,
        .ndo_fix_features       = tun_net_fix_features,
        .ndo_select_queue       = tun_select_queue,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = tun_poll_controller,
-#endif
        .ndo_set_rx_headroom    = tun_set_headroom,
        .ndo_get_stats64        = tun_net_get_stats64,
 };
@@ -1365,9 +1325,6 @@ static const struct net_device_ops tap_netdev_ops = {
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_select_queue       = tun_select_queue,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = tun_poll_controller,
-#endif
        .ndo_features_check     = passthru_features_check,
        .ndo_set_rx_headroom    = tun_set_headroom,
        .ndo_get_stats64        = tun_net_get_stats64,
index 5a9562881d4ef87ddfbd749555f1c8eccf01acc5..9fe3fff818b8a42281b30bcd3bba83c0e0dd36f8 100644 (file)
@@ -537,8 +537,10 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
 
        INIT_WORK(&ctrl->ana_work, nvme_ana_work);
        ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
-       if (!ctrl->ana_log_buf)
+       if (!ctrl->ana_log_buf) {
+               error = -ENOMEM;
                goto out;
+       }
 
        error = nvme_read_ana_log(ctrl, true);
        if (error)
@@ -547,7 +549,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
 out_free_ana_log_buf:
        kfree(ctrl->ana_log_buf);
 out:
-       return -ENOMEM;
+       return error;
 }
 
 void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
index 778c4f76a884320b0fad349260251acf796462a2..2153956a0b207cae268ffabf8392a3025f22432b 100644 (file)
@@ -135,7 +135,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
                if (val & PCIE_ATU_ENABLE)
                        return;
 
-               usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+               mdelay(LINK_WAIT_IATU);
        }
        dev_err(pci->dev, "Outbound iATU is not being enabled\n");
 }
@@ -178,7 +178,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
                if (val & PCIE_ATU_ENABLE)
                        return;
 
-               usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+               mdelay(LINK_WAIT_IATU);
        }
        dev_err(pci->dev, "Outbound iATU is not being enabled\n");
 }
@@ -236,7 +236,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
                if (val & PCIE_ATU_ENABLE)
                        return 0;
 
-               usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+               mdelay(LINK_WAIT_IATU);
        }
        dev_err(pci->dev, "Inbound iATU is not being enabled\n");
 
@@ -282,7 +282,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
                if (val & PCIE_ATU_ENABLE)
                        return 0;
 
-               usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+               mdelay(LINK_WAIT_IATU);
        }
        dev_err(pci->dev, "Inbound iATU is not being enabled\n");
 
index 96126fd8403ccd79450936b7d4f52de551d07c40..9f1a5e399b7033eba9918a7981bdfdb0ea957a7a 100644 (file)
@@ -26,8 +26,7 @@
 
 /* Parameters for the waiting for iATU enabled routine */
 #define LINK_WAIT_MAX_IATU_RETRIES     5
-#define LINK_WAIT_IATU_MIN             9000
-#define LINK_WAIT_IATU_MAX             10000
+#define LINK_WAIT_IATU                 9
 
 /* Synopsys-specific PCIe configuration registers */
 #define PCIE_PORT_LINK_CONTROL         0x710
index ee80e79db21a24907ccc96c654b0e253bdeec399..9ba4d12c179c7551d8a10b59433a33ed5be044cd 100644 (file)
@@ -1484,8 +1484,10 @@ static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
                snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
                hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
                                          name, NULL);
-               if (!hpdev->pci_slot)
+               if (IS_ERR(hpdev->pci_slot)) {
                        pr_warn("pci_create slot %s failed\n", name);
+                       hpdev->pci_slot = NULL;
+               }
        }
 }
 
index ef0b1b6ba86f8fad2a570187252e7579e12ea129..12afa7fdf77e9569d78f517a77b01de129151937 100644 (file)
@@ -457,17 +457,18 @@ static void acpiphp_native_scan_bridge(struct pci_dev *bridge)
 /**
  * enable_slot - enable, configure a slot
  * @slot: slot to be enabled
+ * @bridge: true if enable is for the whole bridge (not a single slot)
  *
  * This function should be called per *physical slot*,
  * not per each slot object in ACPI namespace.
  */
-static void enable_slot(struct acpiphp_slot *slot)
+static void enable_slot(struct acpiphp_slot *slot, bool bridge)
 {
        struct pci_dev *dev;
        struct pci_bus *bus = slot->bus;
        struct acpiphp_func *func;
 
-       if (bus->self && hotplug_is_native(bus->self)) {
+       if (bridge && bus->self && hotplug_is_native(bus->self)) {
                /*
                 * If native hotplug is used, it will take care of hotplug
                 * slot management and resource allocation for hotplug
@@ -701,7 +702,7 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
                                        trim_stale_devices(dev);
 
                        /* configure all functions */
-                       enable_slot(slot);
+                       enable_slot(slot, true);
                } else {
                        disable_slot(slot);
                }
@@ -785,7 +786,7 @@ static void hotplug_event(u32 type, struct acpiphp_context *context)
                if (bridge)
                        acpiphp_check_bridge(bridge);
                else if (!(slot->flags & SLOT_IS_GOING_AWAY))
-                       enable_slot(slot);
+                       enable_slot(slot, false);
 
                break;
 
@@ -973,7 +974,7 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
 
        /* configure all functions */
        if (!(slot->flags & SLOT_ENABLED))
-               enable_slot(slot);
+               enable_slot(slot, false);
 
        pci_unlock_rescan_remove();
        return 0;
index 8d48371caaa2df51568fd70996e2b4594a24a311..e7f45d96b0cbd61e4cf7adfb050fbde547bcf752 100644 (file)
 
 #include "pinctrl-intel.h"
 
-#define CNL_PAD_OWN    0x020
-#define CNL_PADCFGLOCK 0x080
-#define CNL_HOSTSW_OWN 0x0b0
-#define CNL_GPI_IE     0x120
+#define CNL_PAD_OWN            0x020
+#define CNL_PADCFGLOCK         0x080
+#define CNL_LP_HOSTSW_OWN      0x0b0
+#define CNL_H_HOSTSW_OWN       0x0c0
+#define CNL_GPI_IE             0x120
 
 #define CNL_GPP(r, s, e, g)                            \
        {                                               \
 
 #define CNL_NO_GPIO    -1
 
-#define CNL_COMMUNITY(b, s, e, g)                      \
+#define CNL_COMMUNITY(b, s, e, o, g)                   \
        {                                               \
                .barno = (b),                           \
                .padown_offset = CNL_PAD_OWN,           \
                .padcfglock_offset = CNL_PADCFGLOCK,    \
-               .hostown_offset = CNL_HOSTSW_OWN,       \
+               .hostown_offset = (o),                  \
                .ie_offset = CNL_GPI_IE,                \
                .pin_base = (s),                        \
                .npins = ((e) - (s) + 1),               \
                .ngpps = ARRAY_SIZE(g),                 \
        }
 
+#define CNLLP_COMMUNITY(b, s, e, g)                    \
+       CNL_COMMUNITY(b, s, e, CNL_LP_HOSTSW_OWN, g)
+
+#define CNLH_COMMUNITY(b, s, e, g)                     \
+       CNL_COMMUNITY(b, s, e, CNL_H_HOSTSW_OWN, g)
+
 /* Cannon Lake-H */
 static const struct pinctrl_pin_desc cnlh_pins[] = {
        /* GPP_A */
@@ -442,10 +449,10 @@ static const struct intel_function cnlh_functions[] = {
 };
 
 static const struct intel_community cnlh_communities[] = {
-       CNL_COMMUNITY(0, 0, 50, cnlh_community0_gpps),
-       CNL_COMMUNITY(1, 51, 154, cnlh_community1_gpps),
-       CNL_COMMUNITY(2, 155, 248, cnlh_community3_gpps),
-       CNL_COMMUNITY(3, 249, 298, cnlh_community4_gpps),
+       CNLH_COMMUNITY(0, 0, 50, cnlh_community0_gpps),
+       CNLH_COMMUNITY(1, 51, 154, cnlh_community1_gpps),
+       CNLH_COMMUNITY(2, 155, 248, cnlh_community3_gpps),
+       CNLH_COMMUNITY(3, 249, 298, cnlh_community4_gpps),
 };
 
 static const struct intel_pinctrl_soc_data cnlh_soc_data = {
@@ -803,9 +810,9 @@ static const struct intel_padgroup cnllp_community4_gpps[] = {
 };
 
 static const struct intel_community cnllp_communities[] = {
-       CNL_COMMUNITY(0, 0, 67, cnllp_community0_gpps),
-       CNL_COMMUNITY(1, 68, 180, cnllp_community1_gpps),
-       CNL_COMMUNITY(2, 181, 243, cnllp_community4_gpps),
+       CNLLP_COMMUNITY(0, 0, 67, cnllp_community0_gpps),
+       CNLLP_COMMUNITY(1, 68, 180, cnllp_community1_gpps),
+       CNLLP_COMMUNITY(2, 181, 243, cnllp_community4_gpps),
 };
 
 static const struct intel_pinctrl_soc_data cnllp_soc_data = {
index ec8dafc946943261bbefa67d948a03a6cce9fffa..1ea3438ea67e925aa82b6e57e503efb72689fde3 100644 (file)
@@ -887,36 +887,6 @@ static const struct gpio_chip intel_gpio_chip = {
        .set_config = gpiochip_generic_config,
 };
 
-static int intel_gpio_irq_reqres(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
-       int pin;
-       int ret;
-
-       pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
-       if (pin >= 0) {
-               ret = gpiochip_lock_as_irq(gc, pin);
-               if (ret) {
-                       dev_err(pctrl->dev, "unable to lock HW IRQ %d for IRQ\n",
-                               pin);
-                       return ret;
-               }
-       }
-       return 0;
-}
-
-static void intel_gpio_irq_relres(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
-       int pin;
-
-       pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
-       if (pin >= 0)
-               gpiochip_unlock_as_irq(gc, pin);
-}
-
 static void intel_gpio_irq_ack(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1132,8 +1102,6 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
 
 static struct irq_chip intel_gpio_irqchip = {
        .name = "intel-gpio",
-       .irq_request_resources = intel_gpio_irq_reqres,
-       .irq_release_resources = intel_gpio_irq_relres,
        .irq_enable = intel_gpio_irq_enable,
        .irq_ack = intel_gpio_irq_ack,
        .irq_mask = intel_gpio_irq_mask,
index 41ccc759b8b8867a09b992371eafafca35899a0e..1425c2874d4028b5140cc74933733a2bf4f8f22b 100644 (file)
@@ -348,21 +348,12 @@ static void amd_gpio_irq_enable(struct irq_data *d)
        unsigned long flags;
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
-       u32 mask = BIT(INTERRUPT_ENABLE_OFF) | BIT(INTERRUPT_MASK_OFF);
 
        raw_spin_lock_irqsave(&gpio_dev->lock, flags);
        pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
        pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
        pin_reg |= BIT(INTERRUPT_MASK_OFF);
        writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
-       /*
-        * When debounce logic is enabled it takes ~900 us before interrupts
-        * can be enabled.  During this "debounce warm up" period the
-        * "INTERRUPT_ENABLE" bit will read as 0. Poll the bit here until it
-        * reads back as 1, signaling that interrupts are now enabled.
-        */
-       while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
-               continue;
        raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 }
 
@@ -426,7 +417,7 @@ static void amd_gpio_irq_eoi(struct irq_data *d)
 static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
        int ret = 0;
-       u32 pin_reg;
+       u32 pin_reg, pin_reg_irq_en, mask;
        unsigned long flags, irq_flags;
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
@@ -495,6 +486,28 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
        }
 
        pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF;
+       /*
+        * If WAKE_INT_MASTER_REG.MaskStsEn is set, a software write to the
+        * debounce registers of any GPIO will block wake/interrupt status
+        * generation for *all* GPIOs for a lenght of time that depends on
+        * WAKE_INT_MASTER_REG.MaskStsLength[11:0].  During this period the
+        * INTERRUPT_ENABLE bit will read as 0.
+        *
+        * We temporarily enable irq for the GPIO whose configuration is
+        * changing, and then wait for it to read back as 1 to know when
+        * debounce has settled and then disable the irq again.
+        * We do this polling with the spinlock held to ensure other GPIO
+        * access routines do not read an incorrect value for the irq enable
+        * bit of other GPIOs.  We keep the GPIO masked while polling to avoid
+        * spurious irqs, and disable the irq again after polling.
+        */
+       mask = BIT(INTERRUPT_ENABLE_OFF);
+       pin_reg_irq_en = pin_reg;
+       pin_reg_irq_en |= mask;
+       pin_reg_irq_en &= ~BIT(INTERRUPT_MASK_OFF);
+       writel(pin_reg_irq_en, gpio_dev->base + (d->hwirq)*4);
+       while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
+               continue;
        writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
        raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 
index 0f8ac8dec3e16a21e066e9f76318d14ec6c9d78f..a1bd8aaf4d983bcd8b8bdfa10ae944918fabeb75 100644 (file)
@@ -569,6 +569,25 @@ static int bd71837_probe(struct platform_device *pdev)
                        BD71837_REG_REGLOCK);
        }
 
+       /*
+        * There is a HW quirk in BD71837. The shutdown sequence timings for
+        * bucks/LDOs which are controlled via register interface are changed.
+        * At PMIC poweroff the voltage for BUCK6/7 is cut immediately at the
+        * beginning of shut-down sequence. As bucks 6 and 7 are parent
+        * supplies for LDO5 and LDO6 - this causes LDO5/6 voltage
+        * monitoring to errorneously detect under voltage and force PMIC to
+        * emergency state instead of poweroff. In order to avoid this we
+        * disable voltage monitoring for LDO5 and LDO6
+        */
+       err = regmap_update_bits(pmic->mfd->regmap, BD718XX_REG_MVRFLTMASK2,
+                                BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80,
+                                BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80);
+       if (err) {
+               dev_err(&pmic->pdev->dev,
+                       "Failed to disable voltage monitoring\n");
+               goto err;
+       }
+
        for (i = 0; i < ARRAY_SIZE(pmic_regulator_inits); i++) {
 
                struct regulator_desc *desc;
index bb1324f93143f66e609fea2602329230f70ed4ce..9577d89418468a06f1030ff69d2699f71b5710bc 100644 (file)
@@ -3161,7 +3161,7 @@ static inline int regulator_suspend_toggle(struct regulator_dev *rdev,
        if (!rstate->changeable)
                return -EPERM;
 
-       rstate->enabled = en;
+       rstate->enabled = (en) ? ENABLE_IN_SUSPEND : DISABLE_IN_SUSPEND;
 
        return 0;
 }
@@ -4395,13 +4395,13 @@ regulator_register(const struct regulator_desc *regulator_desc,
            !rdev->desc->fixed_uV)
                rdev->is_switch = true;
 
+       dev_set_drvdata(&rdev->dev, rdev);
        ret = device_register(&rdev->dev);
        if (ret != 0) {
                put_device(&rdev->dev);
                goto unset_supplies;
        }
 
-       dev_set_drvdata(&rdev->dev, rdev);
        rdev_init_debugfs(rdev);
 
        /* try to resolve regulators supply since a new one was registered */
index 638f17d4c8485e11fa7d0a9486fc3ec131a341cd..210fc20f7de7a9cd26dbbee68e24a7c0bc2dc94a 100644 (file)
@@ -213,8 +213,6 @@ static void of_get_regulation_constraints(struct device_node *np,
                else if (of_property_read_bool(suspend_np,
                                        "regulator-off-in-suspend"))
                        suspend_state->enabled = DISABLE_IN_SUSPEND;
-               else
-                       suspend_state->enabled = DO_NOTHING_IN_SUSPEND;
 
                if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
                                          &pval))
index fac377320158d2a075978be6a8c96adea51e9305..f42a619198c46caa816c2e2ae6e6fef1cc4d959b 100644 (file)
@@ -3474,11 +3474,10 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
                vscsi->dds.window[LOCAL].liobn,
                vscsi->dds.window[REMOTE].liobn);
 
-       strcpy(vscsi->eye, "VSCSI ");
-       strncat(vscsi->eye, vdev->name, MAX_EYE);
+       snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name);
 
        vscsi->dds.unit_id = vdev->unit_address;
-       strncpy(vscsi->dds.partition_name, partition_name,
+       strscpy(vscsi->dds.partition_name, partition_name,
                sizeof(vscsi->dds.partition_name));
        vscsi->dds.partition_num = partition_number;
 
index f2ec80b0ffc080d614909dab1040b34342769930..271990bc065b924dda965bc8ee777aface7a5b47 100644 (file)
@@ -3335,6 +3335,65 @@ static void ipr_release_dump(struct kref *kref)
        LEAVE;
 }
 
+static void ipr_add_remove_thread(struct work_struct *work)
+{
+       unsigned long lock_flags;
+       struct ipr_resource_entry *res;
+       struct scsi_device *sdev;
+       struct ipr_ioa_cfg *ioa_cfg =
+               container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
+       u8 bus, target, lun;
+       int did_work;
+
+       ENTER;
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+restart:
+       do {
+               did_work = 0;
+               if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
+                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+                       return;
+               }
+
+               list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+                       if (res->del_from_ml && res->sdev) {
+                               did_work = 1;
+                               sdev = res->sdev;
+                               if (!scsi_device_get(sdev)) {
+                                       if (!res->add_to_ml)
+                                               list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+                                       else
+                                               res->del_from_ml = 0;
+                                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+                                       scsi_remove_device(sdev);
+                                       scsi_device_put(sdev);
+                                       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+                               }
+                               break;
+                       }
+               }
+       } while (did_work);
+
+       list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+               if (res->add_to_ml) {
+                       bus = res->bus;
+                       target = res->target;
+                       lun = res->lun;
+                       res->add_to_ml = 0;
+                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+                       scsi_add_device(ioa_cfg->host, bus, target, lun);
+                       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+                       goto restart;
+               }
+       }
+
+       ioa_cfg->scan_done = 1;
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
+       LEAVE;
+}
+
 /**
  * ipr_worker_thread - Worker thread
  * @work:              ioa config struct
@@ -3349,13 +3408,9 @@ static void ipr_release_dump(struct kref *kref)
 static void ipr_worker_thread(struct work_struct *work)
 {
        unsigned long lock_flags;
-       struct ipr_resource_entry *res;
-       struct scsi_device *sdev;
        struct ipr_dump *dump;
        struct ipr_ioa_cfg *ioa_cfg =
                container_of(work, struct ipr_ioa_cfg, work_q);
-       u8 bus, target, lun;
-       int did_work;
 
        ENTER;
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -3393,49 +3448,9 @@ static void ipr_worker_thread(struct work_struct *work)
                return;
        }
 
-restart:
-       do {
-               did_work = 0;
-               if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
-                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-                       return;
-               }
+       schedule_work(&ioa_cfg->scsi_add_work_q);
 
-               list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
-                       if (res->del_from_ml && res->sdev) {
-                               did_work = 1;
-                               sdev = res->sdev;
-                               if (!scsi_device_get(sdev)) {
-                                       if (!res->add_to_ml)
-                                               list_move_tail(&res->queue, &ioa_cfg->free_res_q);
-                                       else
-                                               res->del_from_ml = 0;
-                                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-                                       scsi_remove_device(sdev);
-                                       scsi_device_put(sdev);
-                                       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
-                               }
-                               break;
-                       }
-               }
-       } while (did_work);
-
-       list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
-               if (res->add_to_ml) {
-                       bus = res->bus;
-                       target = res->target;
-                       lun = res->lun;
-                       res->add_to_ml = 0;
-                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-                       scsi_add_device(ioa_cfg->host, bus, target, lun);
-                       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
-                       goto restart;
-               }
-       }
-
-       ioa_cfg->scan_done = 1;
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-       kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
        LEAVE;
 }
 
@@ -9933,6 +9948,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
        INIT_LIST_HEAD(&ioa_cfg->free_res_q);
        INIT_LIST_HEAD(&ioa_cfg->used_res_q);
        INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
+       INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
        init_waitqueue_head(&ioa_cfg->reset_wait_q);
        init_waitqueue_head(&ioa_cfg->msi_wait_q);
        init_waitqueue_head(&ioa_cfg->eeh_wait_q);
index 68afbbde54d352f4c88cb4a3b5695ad3bd713ba0..f6baa23513139a53676b3f5005474cd419818098 100644 (file)
@@ -1575,6 +1575,7 @@ struct ipr_ioa_cfg {
        u8 saved_mode_page_len;
 
        struct work_struct work_q;
+       struct work_struct scsi_add_work_q;
        struct workqueue_struct *reset_work_q;
 
        wait_queue_head_t reset_wait_q;
index 057a60abe664d269af64a2e85a4cea2faf740cfc..1a6ed9b0a249397880b4fac46c36d3acd23fc613 100644 (file)
@@ -360,12 +360,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
                goto buffer_done;
 
        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+               nrport = NULL;
+               spin_lock(&vport->phba->hbalock);
                rport = lpfc_ndlp_get_nrport(ndlp);
-               if (!rport)
-                       continue;
-
-               /* local short-hand pointer. */
-               nrport = rport->remoteport;
+               if (rport)
+                       nrport = rport->remoteport;
+               spin_unlock(&vport->phba->hbalock);
                if (!nrport)
                        continue;
 
@@ -3386,6 +3386,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
        struct lpfc_nodelist  *ndlp;
 #if (IS_ENABLED(CONFIG_NVME_FC))
        struct lpfc_nvme_rport *rport;
+       struct nvme_fc_remote_port *remoteport = NULL;
 #endif
 
        shost = lpfc_shost_from_vport(vport);
@@ -3396,8 +3397,12 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
                if (ndlp->rport)
                        ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
 #if (IS_ENABLED(CONFIG_NVME_FC))
+               spin_lock(&vport->phba->hbalock);
                rport = lpfc_ndlp_get_nrport(ndlp);
                if (rport)
+                       remoteport = rport->remoteport;
+               spin_unlock(&vport->phba->hbalock);
+               if (remoteport)
                        nvme_fc_set_remoteport_devloss(rport->remoteport,
                                                       vport->cfg_devloss_tmo);
 #endif
index 9df0c051349f68220cf19a4d6cacd6f236186f34..aec5b10a8c855ff9d37433afed149beddc670104 100644 (file)
@@ -551,7 +551,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
        unsigned char *statep;
        struct nvme_fc_local_port *localport;
        struct lpfc_nvmet_tgtport *tgtp;
-       struct nvme_fc_remote_port *nrport;
+       struct nvme_fc_remote_port *nrport = NULL;
        struct lpfc_nvme_rport *rport;
 
        cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
@@ -696,11 +696,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
        len += snprintf(buf + len, size - len, "\tRport List:\n");
        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
                /* local short-hand pointer. */
+               spin_lock(&phba->hbalock);
                rport = lpfc_ndlp_get_nrport(ndlp);
-               if (!rport)
-                       continue;
-
-               nrport = rport->remoteport;
+               if (rport)
+                       nrport = rport->remoteport;
+               spin_unlock(&phba->hbalock);
                if (!nrport)
                        continue;
 
index 028462e5994d2ea9329d3cacd3845ff77f6cb013..918ae18ef8a8273f9385e2eb872947bac8a7275c 100644 (file)
@@ -2725,7 +2725,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
        rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
 
+       spin_lock_irq(&vport->phba->hbalock);
        oldrport = lpfc_ndlp_get_nrport(ndlp);
+       spin_unlock_irq(&vport->phba->hbalock);
        if (!oldrport)
                lpfc_nlp_get(ndlp);
 
@@ -2840,7 +2842,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        struct nvme_fc_local_port *localport;
        struct lpfc_nvme_lport *lport;
        struct lpfc_nvme_rport *rport;
-       struct nvme_fc_remote_port *remoteport;
+       struct nvme_fc_remote_port *remoteport = NULL;
 
        localport = vport->localport;
 
@@ -2854,11 +2856,14 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        if (!lport)
                goto input_err;
 
+       spin_lock_irq(&vport->phba->hbalock);
        rport = lpfc_ndlp_get_nrport(ndlp);
-       if (!rport)
+       if (rport)
+               remoteport = rport->remoteport;
+       spin_unlock_irq(&vport->phba->hbalock);
+       if (!remoteport)
                goto input_err;
 
-       remoteport = rport->remoteport;
        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
                         "6033 Unreg nvme remoteport %p, portname x%llx, "
                         "port_id x%06x, portstate x%x port type x%x\n",
index b79b366a94f7993d2dc83c2b0c757c70ed753e75..4a57ffecc7e616fd2bcc4a7897996994deec1543 100644 (file)
@@ -1276,7 +1276,8 @@ static int sd_init_command(struct scsi_cmnd *cmd)
        case REQ_OP_ZONE_RESET:
                return sd_zbc_setup_reset_cmnd(cmd);
        default:
-               BUG();
+               WARN_ON_ONCE(1);
+               return BLKPREP_KILL;
        }
 }
 
@@ -2959,6 +2960,9 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
        if (rot == 1) {
                blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
                blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
+       } else {
+               blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+               blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
        }
 
        if (sdkp->device->type == TYPE_ZBC) {
index 9d5d2ca7fc4ff6648127abdbd32947dec5182dee..c55f38ec391ca7d07e5ae1714168596f64acb125 100644 (file)
@@ -7940,6 +7940,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
                err = -ENOMEM;
                goto out_error;
        }
+
+       /*
+        * Do not use blk-mq at this time because blk-mq does not support
+        * runtime pm.
+        */
+       host->use_blk_mq = false;
+
        hba = shost_priv(host);
        hba->host = host;
        hba->dev = dev;
index 4b5e250e86159f5585205498b8029d85abf39879..e5c7e1ef63188914bfc655b1d187da5fa44166a1 100644 (file)
@@ -899,9 +899,10 @@ static void sdw_release_master_stream(struct sdw_stream_runtime *stream)
        struct sdw_master_runtime *m_rt = stream->m_rt;
        struct sdw_slave_runtime *s_rt, *_s_rt;
 
-       list_for_each_entry_safe(s_rt, _s_rt,
-                       &m_rt->slave_rt_list, m_rt_node)
-               sdw_stream_remove_slave(s_rt->slave, stream);
+       list_for_each_entry_safe(s_rt, _s_rt, &m_rt->slave_rt_list, m_rt_node) {
+               sdw_slave_port_release(s_rt->slave->bus, s_rt->slave, stream);
+               sdw_release_slave_stream(s_rt->slave, stream);
+       }
 
        list_del(&m_rt->bus_node);
 }
@@ -1112,7 +1113,7 @@ int sdw_stream_add_master(struct sdw_bus *bus,
                                "Master runtime config failed for stream:%s",
                                stream->name);
                ret = -ENOMEM;
-               goto error;
+               goto unlock;
        }
 
        ret = sdw_config_stream(bus->dev, stream, stream_config, false);
@@ -1123,11 +1124,11 @@ int sdw_stream_add_master(struct sdw_bus *bus,
        if (ret)
                goto stream_error;
 
-       stream->state = SDW_STREAM_CONFIGURED;
+       goto unlock;
 
 stream_error:
        sdw_release_master_stream(stream);
-error:
+unlock:
        mutex_unlock(&bus->bus_lock);
        return ret;
 }
@@ -1141,6 +1142,10 @@ EXPORT_SYMBOL(sdw_stream_add_master);
  * @stream: SoundWire stream
  * @port_config: Port configuration for audio stream
  * @num_ports: Number of ports
+ *
+ * It is expected that Slave is added before adding Master
+ * to the Stream.
+ *
  */
 int sdw_stream_add_slave(struct sdw_slave *slave,
                struct sdw_stream_config *stream_config,
@@ -1186,6 +1191,12 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
        if (ret)
                goto stream_error;
 
+       /*
+        * Change stream state to CONFIGURED on first Slave add.
+        * Bus is not aware of number of Slave(s) in a stream at this
+        * point so cannot depend on all Slave(s) to be added in order to
+        * change stream state to CONFIGURED.
+        */
        stream->state = SDW_STREAM_CONFIGURED;
        goto error;
 
index 0626e6e3ea0c05dee66e177aa4d2a0c75b703247..421bfc7dda67413bd72ae96055c3767d01a33fa7 100644 (file)
@@ -300,8 +300,8 @@ static int spi_gpio_request(struct device *dev,
                *mflags |= SPI_MASTER_NO_RX;
 
        spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
-       if (IS_ERR(spi_gpio->mosi))
-               return PTR_ERR(spi_gpio->mosi);
+       if (IS_ERR(spi_gpio->sck))
+               return PTR_ERR(spi_gpio->sck);
 
        for (i = 0; i < num_chipselects; i++) {
                spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs",
index 95dc4d78618df29c2b15d92fa6d11e3b84bb7ed1..b37de1d991d6abe1e0a25c5fffcf04d9851aba3e 100644 (file)
@@ -598,11 +598,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
 
        ret = wait_event_interruptible_timeout(rspi->wait,
                                               rspi->dma_callbacked, HZ);
-       if (ret > 0 && rspi->dma_callbacked)
+       if (ret > 0 && rspi->dma_callbacked) {
                ret = 0;
-       else if (!ret) {
-               dev_err(&rspi->master->dev, "DMA timeout\n");
-               ret = -ETIMEDOUT;
+       } else {
+               if (!ret) {
+                       dev_err(&rspi->master->dev, "DMA timeout\n");
+                       ret = -ETIMEDOUT;
+               }
                if (tx)
                        dmaengine_terminate_all(rspi->master->dma_tx);
                if (rx)
@@ -1350,12 +1352,36 @@ static const struct platform_device_id spi_driver_ids[] = {
 
 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
 
+#ifdef CONFIG_PM_SLEEP
+static int rspi_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+       return spi_master_suspend(rspi->master);
+}
+
+static int rspi_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+       return spi_master_resume(rspi->master);
+}
+
+static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
+#define DEV_PM_OPS     &rspi_pm_ops
+#else
+#define DEV_PM_OPS     NULL
+#endif /* CONFIG_PM_SLEEP */
+
 static struct platform_driver rspi_driver = {
        .probe =        rspi_probe,
        .remove =       rspi_remove,
        .id_table =     spi_driver_ids,
        .driver         = {
                .name = "renesas_spi",
+               .pm = DEV_PM_OPS,
                .of_match_table = of_match_ptr(rspi_of_match),
        },
 };
index 539d6d1a277a6179f7053698cbee772083354828..101cd6aae2ea520afcac89671071cdabe2341f8f 100644 (file)
@@ -397,7 +397,8 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
 
 static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
 {
-       sh_msiof_write(p, STR, sh_msiof_read(p, STR));
+       sh_msiof_write(p, STR,
+                      sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
 }
 
 static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
@@ -1426,12 +1427,37 @@ static const struct platform_device_id spi_driver_ids[] = {
 };
 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
 
+#ifdef CONFIG_PM_SLEEP
+static int sh_msiof_spi_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+       return spi_master_suspend(p->master);
+}
+
+static int sh_msiof_spi_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+       return spi_master_resume(p->master);
+}
+
+static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
+                        sh_msiof_spi_resume);
+#define DEV_PM_OPS     &sh_msiof_spi_pm_ops
+#else
+#define DEV_PM_OPS     NULL
+#endif /* CONFIG_PM_SLEEP */
+
 static struct platform_driver sh_msiof_spi_drv = {
        .probe          = sh_msiof_spi_probe,
        .remove         = sh_msiof_spi_remove,
        .id_table       = spi_driver_ids,
        .driver         = {
                .name           = "spi_sh_msiof",
+               .pm             = DEV_PM_OPS,
                .of_match_table = of_match_ptr(sh_msiof_match),
        },
 };
index 6f7b946b5cedf103cbed9886d7ec3079bbdcf6de..1427f343b39a3dc4468e14c3612814508993b3c0 100644 (file)
@@ -1063,6 +1063,24 @@ static int tegra_slink_probe(struct platform_device *pdev)
                goto exit_free_master;
        }
 
+       /* disabled clock may cause interrupt storm upon request */
+       tspi->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(tspi->clk)) {
+               ret = PTR_ERR(tspi->clk);
+               dev_err(&pdev->dev, "Can not get clock %d\n", ret);
+               goto exit_free_master;
+       }
+       ret = clk_prepare(tspi->clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
+               goto exit_free_master;
+       }
+       ret = clk_enable(tspi->clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
+               goto exit_free_master;
+       }
+
        spi_irq = platform_get_irq(pdev, 0);
        tspi->irq = spi_irq;
        ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
@@ -1071,14 +1089,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
        if (ret < 0) {
                dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
                                        tspi->irq);
-               goto exit_free_master;
-       }
-
-       tspi->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(tspi->clk)) {
-               dev_err(&pdev->dev, "can not get clock\n");
-               ret = PTR_ERR(tspi->clk);
-               goto exit_free_irq;
+               goto exit_clk_disable;
        }
 
        tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
@@ -1138,6 +1149,8 @@ static int tegra_slink_probe(struct platform_device *pdev)
        tegra_slink_deinit_dma_param(tspi, true);
 exit_free_irq:
        free_irq(spi_irq, tspi);
+exit_clk_disable:
+       clk_disable(tspi->clk);
 exit_free_master:
        spi_master_put(master);
        return ret;
@@ -1150,6 +1163,8 @@ static int tegra_slink_remove(struct platform_device *pdev)
 
        free_irq(tspi->irq, tspi);
 
+       clk_disable(tspi->clk);
+
        if (tspi->tx_dma_chan)
                tegra_slink_deinit_dma_param(tspi, false);
 
index f48e06a03cdbf621a8951f50dc38ea95ef5fce77..9a58aaf72edd6978dea5930f33789386bd3266aa 100644 (file)
@@ -1,9 +1,3 @@
-config SOC_CAMERA_IMX074
-       tristate "imx074 support (DEPRECATED)"
-       depends on SOC_CAMERA && I2C
-       help
-         This driver supports IMX074 cameras from Sony
-
 config SOC_CAMERA_MT9T031
        tristate "mt9t031 support (DEPRECATED)"
        depends on SOC_CAMERA && I2C
index 9518ffd8b8bac81cc1b725046857d5f229e58b15..4e680d753941f71ea299d87b6c0cd18fc307b50f 100644 (file)
 #include "iscsi_target_nego.h"
 #include "iscsi_target_auth.h"
 
-static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
-{
-       int j = DIV_ROUND_UP(len, 2), rc;
-
-       rc = hex2bin(dst, src, j);
-       if (rc < 0)
-               pr_debug("CHAP string contains non hex digit symbols\n");
-
-       dst[j] = '\0';
-       return j;
-}
-
-static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
-{
-       int i;
-
-       for (i = 0; i < src_len; i++) {
-               sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
-       }
-}
-
 static int chap_gen_challenge(
        struct iscsi_conn *conn,
        int caller,
@@ -62,7 +41,7 @@ static int chap_gen_challenge(
        ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH);
        if (unlikely(ret))
                return ret;
-       chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
+       bin2hex(challenge_asciihex, chap->challenge,
                                CHAP_CHALLENGE_LENGTH);
        /*
         * Set CHAP_C, and copy the generated challenge into c_str.
@@ -248,9 +227,16 @@ static int chap_server_compute_md5(
                pr_err("Could not find CHAP_R.\n");
                goto out;
        }
+       if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
+               pr_err("Malformed CHAP_R\n");
+               goto out;
+       }
+       if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
+               pr_err("Malformed CHAP_R\n");
+               goto out;
+       }
 
        pr_debug("[server] Got CHAP_R=%s\n", chap_r);
-       chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
 
        tfm = crypto_alloc_shash("md5", 0, 0);
        if (IS_ERR(tfm)) {
@@ -294,7 +280,7 @@ static int chap_server_compute_md5(
                goto out;
        }
 
-       chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
+       bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
        pr_debug("[server] MD5 Server Digest: %s\n", response);
 
        if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
@@ -349,9 +335,7 @@ static int chap_server_compute_md5(
                pr_err("Could not find CHAP_C.\n");
                goto out;
        }
-       pr_debug("[server] Got CHAP_C=%s\n", challenge);
-       challenge_len = chap_string_to_hex(challenge_binhex, challenge,
-                               strlen(challenge));
+       challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
        if (!challenge_len) {
                pr_err("Unable to convert incoming challenge\n");
                goto out;
@@ -360,6 +344,11 @@ static int chap_server_compute_md5(
                pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
                goto out;
        }
+       if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
+               pr_err("Malformed CHAP_C\n");
+               goto out;
+       }
+       pr_debug("[server] Got CHAP_C=%s\n", challenge);
        /*
         * During mutual authentication, the CHAP_C generated by the
         * initiator must not match the original CHAP_C generated by
@@ -413,7 +402,7 @@ static int chap_server_compute_md5(
        /*
         * Convert response from binary hex to ascii hext.
         */
-       chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
+       bin2hex(response, digest, MD5_SIGNATURE_SIZE);
        *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
                        response);
        *nr_out_len += 1;
index 24a5f05e769bee9f7972319c13fb84745773583c..e5389591bb4f1f83a207ee5be7e3577648972599 100644 (file)
@@ -1054,8 +1054,8 @@ static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo)
        /* Get the address of the host memory buffer.
         */
        bdp = pinfo->rx_cur;
-       while (bdp->cbd_sc & BD_SC_EMPTY)
-               ;
+       if (bdp->cbd_sc & BD_SC_EMPTY)
+               return NO_POLL_CHAR;
 
        /* If the buffer address is in the CPM DPRAM, don't
         * convert it.
@@ -1090,7 +1090,11 @@ static int cpm_get_poll_char(struct uart_port *port)
                poll_chars = 0;
        }
        if (poll_chars <= 0) {
-               poll_chars = poll_wait_key(poll_buf, pinfo);
+               int ret = poll_wait_key(poll_buf, pinfo);
+
+               if (ret == NO_POLL_CHAR)
+                       return ret;
+               poll_chars = ret;
                pollp = poll_buf;
        }
        poll_chars--;
index 51e47a63d61accef957c87909eedbef40c0ef42e..3f8d1274fc85c2ee0306d93db03042e55c7e193d 100644 (file)
@@ -979,7 +979,8 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
        struct circ_buf *ring = &sport->rx_ring;
        int ret, nent;
        int bits, baud;
-       struct tty_struct *tty = tty_port_tty_get(&sport->port.state->port);
+       struct tty_port *port = &sport->port.state->port;
+       struct tty_struct *tty = port->tty;
        struct ktermios *termios = &tty->termios;
 
        baud = tty_get_baud_rate(tty);
index 239c0fa2e9816805cd134fad697495f7f7929d4b..0f67197a3783ffdd62eccd3b41a813a80d66b6df 100644 (file)
@@ -2351,6 +2351,14 @@ static int imx_uart_probe(struct platform_device *pdev)
                                ret);
                        return ret;
                }
+
+               ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0,
+                                      dev_name(&pdev->dev), sport);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to request rts irq: %d\n",
+                               ret);
+                       return ret;
+               }
        } else {
                ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0,
                                       dev_name(&pdev->dev), sport);
index d04b5eeea3c6989e9313c00d1085bbd801ffe9cd..170e446a2f625c4af4bf29c05f87931182979a73 100644 (file)
@@ -511,6 +511,7 @@ static void mvebu_uart_set_termios(struct uart_port *port,
                termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
                termios->c_cflag &= CREAD | CBAUD;
                termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
+               termios->c_cflag |= CS8;
        }
 
        spin_unlock_irqrestore(&port->lock, flags);
index 32bc3e3fe4d30bcea455fb7dc13657b63b111cd9..5e5da9acaf0a9f2522b0b1659968499d91a97e4a 100644 (file)
@@ -1255,6 +1255,7 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
 static int tty_reopen(struct tty_struct *tty)
 {
        struct tty_driver *driver = tty->driver;
+       int retval;
 
        if (driver->type == TTY_DRIVER_TYPE_PTY &&
            driver->subtype == PTY_TYPE_MASTER)
@@ -1268,10 +1269,14 @@ static int tty_reopen(struct tty_struct *tty)
 
        tty->count++;
 
-       if (!tty->ldisc)
-               return tty_ldisc_reinit(tty, tty->termios.c_line);
+       if (tty->ldisc)
+               return 0;
 
-       return 0;
+       retval = tty_ldisc_reinit(tty, tty->termios.c_line);
+       if (retval)
+               tty->count--;
+
+       return retval;
 }
 
 /**
index a78ad10a119b5b17e53e7391d2f73057193f4c1e..73cdc0d633dd62c12e6ed1ca6545846b8dabaa09 100644 (file)
@@ -32,6 +32,8 @@
 #include <asm/io.h>
 #include <linux/uaccess.h>
 
+#include <linux/nospec.h>
+
 #include <linux/kbd_kern.h>
 #include <linux/vt_kern.h>
 #include <linux/kbd_diacr.h>
@@ -700,6 +702,8 @@ int vt_ioctl(struct tty_struct *tty,
                if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
                        ret = -ENXIO;
                else {
+                       vsa.console = array_index_nospec(vsa.console,
+                                                        MAX_NR_CONSOLES + 1);
                        vsa.console--;
                        console_lock();
                        ret = vc_allocate(vsa.console);
index 656d247819c9d92c257581add9edbc7f166f899a..bec581fb7c6361891a81a3a0aa88d3863b686f58 100644 (file)
@@ -460,7 +460,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
 
        set_bit(WDM_RESPONDING, &desc->flags);
        spin_unlock_irq(&desc->iuspin);
-       rv = usb_submit_urb(desc->response, GFP_ATOMIC);
+       rv = usb_submit_urb(desc->response, GFP_KERNEL);
        spin_lock_irq(&desc->iuspin);
        if (rv) {
                dev_err(&desc->intf->dev,
index 15cc76e22123e7044c77dc5b436ec13916dc83f1..99116af07f1d9ca1e7c275a70122f370ec0d32a3 100644 (file)
@@ -109,8 +109,15 @@ static void *usb_role_switch_match(struct device_connection *con, int ep,
  */
 struct usb_role_switch *usb_role_switch_get(struct device *dev)
 {
-       return device_connection_find_match(dev, "usb-role-switch", NULL,
-                                           usb_role_switch_match);
+       struct usb_role_switch *sw;
+
+       sw = device_connection_find_match(dev, "usb-role-switch", NULL,
+                                         usb_role_switch_match);
+
+       if (!IS_ERR_OR_NULL(sw))
+               WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+
+       return sw;
 }
 EXPORT_SYMBOL_GPL(usb_role_switch_get);
 
@@ -122,8 +129,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_get);
  */
 void usb_role_switch_put(struct usb_role_switch *sw)
 {
-       if (!IS_ERR_OR_NULL(sw))
+       if (!IS_ERR_OR_NULL(sw)) {
                put_device(&sw->dev);
+               module_put(sw->dev.parent->driver->owner);
+       }
 }
 EXPORT_SYMBOL_GPL(usb_role_switch_put);
 
index 6ce77b33da61302281dd1c49284e457ff04b73bd..244417d0dfd1fdc74c2f0c8ee58e35f14dd9e7db 100644 (file)
@@ -1434,10 +1434,13 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        struct async *as = NULL;
        struct usb_ctrlrequest *dr = NULL;
        unsigned int u, totlen, isofrmlen;
-       int i, ret, is_in, num_sgs = 0, ifnum = -1;
+       int i, ret, num_sgs = 0, ifnum = -1;
        int number_of_packets = 0;
        unsigned int stream_id = 0;
        void *buf;
+       bool is_in;
+       bool allow_short = false;
+       bool allow_zero = false;
        unsigned long mask =    USBDEVFS_URB_SHORT_NOT_OK |
                                USBDEVFS_URB_BULK_CONTINUATION |
                                USBDEVFS_URB_NO_FSBR |
@@ -1471,6 +1474,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        u = 0;
        switch (uurb->type) {
        case USBDEVFS_URB_TYPE_CONTROL:
+               if (is_in)
+                       allow_short = true;
                if (!usb_endpoint_xfer_control(&ep->desc))
                        return -EINVAL;
                /* min 8 byte setup packet */
@@ -1511,6 +1516,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
                break;
 
        case USBDEVFS_URB_TYPE_BULK:
+               if (!is_in)
+                       allow_zero = true;
+               else
+                       allow_short = true;
                switch (usb_endpoint_type(&ep->desc)) {
                case USB_ENDPOINT_XFER_CONTROL:
                case USB_ENDPOINT_XFER_ISOC:
@@ -1531,6 +1540,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
                if (!usb_endpoint_xfer_int(&ep->desc))
                        return -EINVAL;
  interrupt_urb:
+               if (!is_in)
+                       allow_zero = true;
+               else
+                       allow_short = true;
                break;
 
        case USBDEVFS_URB_TYPE_ISO:
@@ -1676,14 +1689,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
        if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
                u |= URB_ISO_ASAP;
-       if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in)
+       if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
                u |= URB_SHORT_NOT_OK;
-       if (uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+       if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
                u |= URB_ZERO_PACKET;
        if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT)
                u |= URB_NO_INTERRUPT;
        as->urb->transfer_flags = u;
 
+       if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
+               dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n");
+       if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+               dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n");
+
        as->urb->transfer_buffer_length = uurb->buffer_length;
        as->urb->setup_packet = (unsigned char *)dr;
        dr = NULL;
index e76e95f62f7628a907d0132c31261b2ffc641e49..a1f225f077cd23b3ebbf07951b0734183c209bf5 100644 (file)
@@ -512,7 +512,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
        struct device *dev;
        struct usb_device *udev;
        int retval = 0;
-       int lpm_disable_error = -ENODEV;
 
        if (!iface)
                return -ENODEV;
@@ -533,16 +532,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
 
        iface->condition = USB_INTERFACE_BOUND;
 
-       /* See the comment about disabling LPM in usb_probe_interface(). */
-       if (driver->disable_hub_initiated_lpm) {
-               lpm_disable_error = usb_unlocked_disable_lpm(udev);
-               if (lpm_disable_error) {
-                       dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n",
-                               __func__, driver->name);
-                       return -ENOMEM;
-               }
-       }
-
        /* Claimed interfaces are initially inactive (suspended) and
         * runtime-PM-enabled, but only if the driver has autosuspend
         * support.  Otherwise they are marked active, to prevent the
@@ -561,9 +550,20 @@ int usb_driver_claim_interface(struct usb_driver *driver,
        if (device_is_registered(dev))
                retval = device_bind_driver(dev);
 
-       /* Attempt to re-enable USB3 LPM, if the disable was successful. */
-       if (!lpm_disable_error)
-               usb_unlocked_enable_lpm(udev);
+       if (retval) {
+               dev->driver = NULL;
+               usb_set_intfdata(iface, NULL);
+               iface->needs_remote_wakeup = 0;
+               iface->condition = USB_INTERFACE_UNBOUND;
+
+               /*
+                * Unbound interfaces are always runtime-PM-disabled
+                * and runtime-PM-suspended
+                */
+               if (driver->supports_autosuspend)
+                       pm_runtime_disable(dev);
+               pm_runtime_set_suspended(dev);
+       }
 
        return retval;
 }
index e77dfe5ed5ec7ec84462ba264865aa6ae7234ebc..178d6c6063c0280a06c6dc3dc6611394d8233479 100644 (file)
@@ -58,6 +58,7 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
        quirk_list = kcalloc(quirk_count, sizeof(struct quirk_entry),
                             GFP_KERNEL);
        if (!quirk_list) {
+               quirk_count = 0;
                mutex_unlock(&quirk_mutex);
                return -ENOMEM;
        }
@@ -154,7 +155,7 @@ static struct kparam_string quirks_param_string = {
        .string = quirks_param,
 };
 
-module_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644);
+device_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644);
 MODULE_PARM_DESC(quirks, "Add/modify USB quirks by specifying quirks=vendorID:productID:quirks");
 
 /* Lists of quirky USB devices, split in device quirks and interface quirks.
index 623be3174fb32e1036c697430f34adafd4f22d0f..79d8bd7a612e65b5c765f16e0c5567a440cdc1da 100644 (file)
@@ -228,6 +228,8 @@ struct usb_host_interface *usb_find_alt_setting(
        struct usb_interface_cache *intf_cache = NULL;
        int i;
 
+       if (!config)
+               return NULL;
        for (i = 0; i < config->desc.bNumInterfaces; i++) {
                if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
                                == iface_num) {
index df827ff57b0d0c08b0dd1d9763ab0ebcb7fa3199..23a0df79ef2129323d3b8f0548f06c25d1421916 100644 (file)
@@ -658,16 +658,6 @@ dsps_dma_controller_create(struct musb *musb, void __iomem *base)
        return controller;
 }
 
-static void dsps_dma_controller_destroy(struct dma_controller *c)
-{
-       struct musb *musb = c->musb;
-       struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent);
-       void __iomem *usbss_base = glue->usbss_base;
-
-       musb_writel(usbss_base, USBSS_IRQ_CLEARR, USBSS_IRQ_PD_COMP);
-       cppi41_dma_controller_destroy(c);
-}
-
 #ifdef CONFIG_PM_SLEEP
 static void dsps_dma_controller_suspend(struct dsps_glue *glue)
 {
@@ -697,7 +687,7 @@ static struct musb_platform_ops dsps_ops = {
 
 #ifdef CONFIG_USB_TI_CPPI41_DMA
        .dma_init       = dsps_dma_controller_create,
-       .dma_exit       = dsps_dma_controller_destroy,
+       .dma_exit       = cppi41_dma_controller_destroy,
 #endif
        .enable         = dsps_musb_enable,
        .disable        = dsps_musb_disable,
index ddaac63ecf1225385d53a53f13521bb7b0ae9e00..d990aa510fabf008a332706c34fe3f61b2929e82 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/device.h>
 #include <linux/list.h>
+#include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/usb/typec_mux.h>
 
@@ -49,8 +50,10 @@ struct typec_switch *typec_switch_get(struct device *dev)
        mutex_lock(&switch_lock);
        sw = device_connection_find_match(dev, "typec-switch", NULL,
                                          typec_switch_match);
-       if (!IS_ERR_OR_NULL(sw))
+       if (!IS_ERR_OR_NULL(sw)) {
+               WARN_ON(!try_module_get(sw->dev->driver->owner));
                get_device(sw->dev);
+       }
        mutex_unlock(&switch_lock);
 
        return sw;
@@ -65,8 +68,10 @@ EXPORT_SYMBOL_GPL(typec_switch_get);
  */
 void typec_switch_put(struct typec_switch *sw)
 {
-       if (!IS_ERR_OR_NULL(sw))
+       if (!IS_ERR_OR_NULL(sw)) {
+               module_put(sw->dev->driver->owner);
                put_device(sw->dev);
+       }
 }
 EXPORT_SYMBOL_GPL(typec_switch_put);
 
@@ -136,8 +141,10 @@ struct typec_mux *typec_mux_get(struct device *dev, const char *name)
 
        mutex_lock(&mux_lock);
        mux = device_connection_find_match(dev, name, NULL, typec_mux_match);
-       if (!IS_ERR_OR_NULL(mux))
+       if (!IS_ERR_OR_NULL(mux)) {
+               WARN_ON(!try_module_get(mux->dev->driver->owner));
                get_device(mux->dev);
+       }
        mutex_unlock(&mux_lock);
 
        return mux;
@@ -152,8 +159,10 @@ EXPORT_SYMBOL_GPL(typec_mux_get);
  */
 void typec_mux_put(struct typec_mux *mux)
 {
-       if (!IS_ERR_OR_NULL(mux))
+       if (!IS_ERR_OR_NULL(mux)) {
+               module_put(mux->dev->driver->owner);
                put_device(mux->dev);
+       }
 }
 EXPORT_SYMBOL_GPL(typec_mux_put);
 
index f32d7125ad0f237d61173cd72383683ac380c4e4..4becbf168b7f0df3229b1e1a5d0fb8daca02df0d 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -447,6 +447,7 @@ bool dax_lock_mapping_entry(struct page *page)
                        xa_unlock_irq(&mapping->i_pages);
                        break;
                } else if (IS_ERR(entry)) {
+                       xa_unlock_irq(&mapping->i_pages);
                        WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
                        continue;
                }
@@ -1120,21 +1121,12 @@ static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
 {
        struct inode *inode = mapping->host;
        unsigned long vaddr = vmf->address;
-       vm_fault_t ret = VM_FAULT_NOPAGE;
-       struct page *zero_page;
-       pfn_t pfn;
-
-       zero_page = ZERO_PAGE(0);
-       if (unlikely(!zero_page)) {
-               ret = VM_FAULT_OOM;
-               goto out;
-       }
+       pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
+       vm_fault_t ret;
 
-       pfn = page_to_pfn_t(zero_page);
        dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
                        false);
        ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
-out:
        trace_dax_load_hole(inode, vmf, ret);
        return ret;
 }
index 7f7ee18fe179c258ca9ccb41148592d816ec96f6..e4bb9386c04551e1af155154213285c6da688531 100644 (file)
@@ -1448,6 +1448,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
        }
        inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
        ei->i_flags = le32_to_cpu(raw_inode->i_flags);
+       ext2_set_inode_flags(inode);
        ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
        ei->i_frag_no = raw_inode->i_frag;
        ei->i_frag_size = raw_inode->i_fsize;
@@ -1517,7 +1518,6 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
                           new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
        }
        brelse (bh);
-       ext2_set_inode_flags(inode);
        unlock_new_inode(inode);
        return inode;
        
index a82c292af6c58eb4694b790d000cade8416a32ba..8c738c0e6e9f744df88864b7783e7e15f54f07c5 100644 (file)
@@ -90,7 +90,6 @@ struct drm_panel {
        struct drm_device *drm;
        struct drm_connector *connector;
        struct device *dev;
-       struct device_link *link;
 
        const struct drm_panel_funcs *funcs;
 
index 8c19470785e2858e4796a4df8a234ebee900d4b8..3fc4854dce491266cec574c2b0b09f51972b64b7 100644 (file)
@@ -312,6 +312,24 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
        return bo;
 }
 
+/**
+ * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
+ * its refcount has already reached zero.
+ * @bo: The buffer object.
+ *
+ * Used to reference a TTM buffer object in lookups where the object is removed
+ * from the lookup structure during the destructor and for RCU lookups.
+ *
+ * Returns: @bo if the referencing was successful, NULL otherwise.
+ */
+static inline __must_check struct ttm_buffer_object *
+ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
+{
+       if (!kref_get_unless_zero(&bo->kref))
+               return NULL;
+       return bo;
+}
+
 /**
  * ttm_bo_wait - wait for buffer idle.
  *
index a528747f8aedb048b873353d40426d76058cffd1..e8338e5dc10bfd5e6bb415337ff3667c39e008f1 100644 (file)
@@ -78,9 +78,9 @@ enum {
        BD71837_REG_TRANS_COND0        = 0x1F,
        BD71837_REG_TRANS_COND1        = 0x20,
        BD71837_REG_VRFAULTEN          = 0x21,
-       BD71837_REG_MVRFLTMASK0        = 0x22,
-       BD71837_REG_MVRFLTMASK1        = 0x23,
-       BD71837_REG_MVRFLTMASK2        = 0x24,
+       BD718XX_REG_MVRFLTMASK0        = 0x22,
+       BD718XX_REG_MVRFLTMASK1        = 0x23,
+       BD718XX_REG_MVRFLTMASK2        = 0x24,
        BD71837_REG_RCVCFG             = 0x25,
        BD71837_REG_RCVNUM             = 0x26,
        BD71837_REG_PWRONCONFIG0       = 0x27,
@@ -159,6 +159,33 @@ enum {
 #define BUCK8_MASK             0x3F
 #define BUCK8_DEFAULT          0x1E
 
+/* BD718XX Voltage monitoring masks */
+#define BD718XX_BUCK1_VRMON80           0x1
+#define BD718XX_BUCK1_VRMON130          0x2
+#define BD718XX_BUCK2_VRMON80           0x4
+#define BD718XX_BUCK2_VRMON130          0x8
+#define BD718XX_1ST_NODVS_BUCK_VRMON80  0x1
+#define BD718XX_1ST_NODVS_BUCK_VRMON130 0x2
+#define BD718XX_2ND_NODVS_BUCK_VRMON80  0x4
+#define BD718XX_2ND_NODVS_BUCK_VRMON130 0x8
+#define BD718XX_3RD_NODVS_BUCK_VRMON80  0x10
+#define BD718XX_3RD_NODVS_BUCK_VRMON130 0x20
+#define BD718XX_4TH_NODVS_BUCK_VRMON80  0x40
+#define BD718XX_4TH_NODVS_BUCK_VRMON130 0x80
+#define BD718XX_LDO1_VRMON80            0x1
+#define BD718XX_LDO2_VRMON80            0x2
+#define BD718XX_LDO3_VRMON80            0x4
+#define BD718XX_LDO4_VRMON80            0x8
+#define BD718XX_LDO5_VRMON80            0x10
+#define BD718XX_LDO6_VRMON80            0x20
+
+/* BD71837 specific voltage monitoring masks */
+#define BD71837_BUCK3_VRMON80           0x10
+#define BD71837_BUCK3_VRMON130          0x20
+#define BD71837_BUCK4_VRMON80           0x40
+#define BD71837_BUCK4_VRMON130          0x80
+#define BD71837_LDO7_VRMON80            0x40
+
 /* BD71837_REG_IRQ bits */
 #define IRQ_SWRST              0x40
 #define IRQ_PWRON_S            0x20
index 67662d01130a7b4f69c8b4ec176f59b11e8ae33d..3ef82d3a78db51d958858470f8158f72cfd74fb3 100644 (file)
@@ -49,8 +49,9 @@ struct netpoll_info {
 };
 
 #ifdef CONFIG_NETPOLL
-extern void netpoll_poll_disable(struct net_device *dev);
-extern void netpoll_poll_enable(struct net_device *dev);
+void netpoll_poll_dev(struct net_device *dev);
+void netpoll_poll_disable(struct net_device *dev);
+void netpoll_poll_enable(struct net_device *dev);
 #else
 static inline void netpoll_poll_disable(struct net_device *dev) { return; }
 static inline void netpoll_poll_enable(struct net_device *dev) { return; }
index 3468703d663af6d94bbf6fd07343d8bd9418b81a..a459a5e973a7294f171e192dd4dacf98d5b27c16 100644 (file)
@@ -48,9 +48,9 @@ struct regulator;
  * DISABLE_IN_SUSPEND  - turn off regulator in suspend states
  * ENABLE_IN_SUSPEND   - keep regulator on in suspend states
  */
-#define DO_NOTHING_IN_SUSPEND  (-1)
-#define DISABLE_IN_SUSPEND     0
-#define ENABLE_IN_SUSPEND      1
+#define DO_NOTHING_IN_SUSPEND  0
+#define DISABLE_IN_SUSPEND     1
+#define ENABLE_IN_SUSPEND      2
 
 /* Regulator active discharge flags */
 enum regulator_active_discharge {
index b2bd4b4127c46a2e8f2eb103d989b629d7169570..69ee30456864a05ceb76bac1d0dbe8df3a0e3448 100644 (file)
@@ -81,8 +81,10 @@ enum spi_mem_data_dir {
  * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
  * @data.buswidth: number of IO lanes used to send/receive the data
  * @data.dir: direction of the transfer
- * @data.buf.in: input buffer
- * @data.buf.out: output buffer
+ * @data.nbytes: number of data bytes to send/receive. Can be zero if the
+ *              operation does not involve transferring data
+ * @data.buf.in: input buffer (must be DMA-able)
+ * @data.buf.out: output buffer (must be DMA-able)
  */
 struct spi_mem_op {
        struct {
@@ -105,7 +107,6 @@ struct spi_mem_op {
                u8 buswidth;
                enum spi_mem_data_dir dir;
                unsigned int nbytes;
-               /* buf.{in,out} must be DMA-able. */
                union {
                        void *in;
                        const void *out;
index c43e9a01b892692b3901cfb1fd1459cdb428c553..7ddfc65586b046e80e3ae7357368b696b406507f 100644 (file)
@@ -30,6 +30,7 @@
 
 #define MTL_MAX_RX_QUEUES      8
 #define MTL_MAX_TX_QUEUES      8
+#define STMMAC_CH_MAX          8
 
 #define STMMAC_RX_COE_NONE     0
 #define STMMAC_RX_COE_TYPE1    1
index 409c845d4cd3dce5762c27a2e7e7c1c5c288f134..422b1c01ee0de0d679d7f6cb4276bb7d45e82186 100644 (file)
@@ -172,7 +172,7 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
 static __always_inline __must_check
 size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
 {
-       if (unlikely(!check_copy_size(addr, bytes, false)))
+       if (unlikely(!check_copy_size(addr, bytes, true)))
                return 0;
        else
                return _copy_to_iter_mcsafe(addr, bytes, i);
index 316694dafa5bae75363e34c035906e2d937c33c6..008f466d1da7ea60c7b46dc4b5133e8ed257c9c3 100644 (file)
@@ -87,7 +87,7 @@ struct nfc_hci_pipe {
  * According to specification 102 622 chapter 4.4 Pipes,
  * the pipe identifier is 7 bits long.
  */
-#define NFC_HCI_MAX_PIPES              127
+#define NFC_HCI_MAX_PIPES              128
 struct nfc_hci_init_data {
        u8 gate_count;
        struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
index 139632b871816f9e3dad943b7c43dd8d89d0e3fe..0cd40ebfa1b1669e2c29aa757b294cc479e51991 100644 (file)
@@ -338,6 +338,15 @@ extern "C" {
  */
 #define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE      fourcc_mod_code(SAMSUNG, 1)
 
+/*
+ * Tiled, 16 (pixels) x 16 (lines) - sized macroblocks
+ *
+ * This is a simple tiled layout using tiles of 16x16 pixels in a row-major
+ * layout. For YCbCr formats Cb/Cr components are taken in such a way that
+ * they correspond to their 16x16 luma block.
+ */
+#define DRM_FORMAT_MOD_SAMSUNG_16_16_TILE      fourcc_mod_code(SAMSUNG, 2)
+
 /*
  * Qualcomm Compressed Format
  *
index 910cc4334b21557f98298082b16e2359a6414e01..7b8c9e19bad1c2bf72c21dcf6b358559d5e4b4ea 100644 (file)
@@ -65,7 +65,7 @@
 
 /* keyctl structures */
 struct keyctl_dh_params {
-       __s32 dh_private;
+       __s32 private;
        __s32 prime;
        __s32 base;
 };
index 488ef9663c01f3b4d2cc44b9ca88863ced2860e7..0a0f2ec75370d83d8e0afdc24f04e8e65bf870b9 100644 (file)
@@ -132,6 +132,7 @@ struct smap_psock {
        struct work_struct gc_work;
 
        struct proto *sk_proto;
+       void (*save_unhash)(struct sock *sk);
        void (*save_close)(struct sock *sk, long timeout);
        void (*save_data_ready)(struct sock *sk);
        void (*save_write_space)(struct sock *sk);
@@ -143,6 +144,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
                            int offset, size_t size, int flags);
+static void bpf_tcp_unhash(struct sock *sk);
 static void bpf_tcp_close(struct sock *sk, long timeout);
 
 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
@@ -184,6 +186,7 @@ static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
                         struct proto *base)
 {
        prot[SOCKMAP_BASE]                      = *base;
+       prot[SOCKMAP_BASE].unhash               = bpf_tcp_unhash;
        prot[SOCKMAP_BASE].close                = bpf_tcp_close;
        prot[SOCKMAP_BASE].recvmsg              = bpf_tcp_recvmsg;
        prot[SOCKMAP_BASE].stream_memory_read   = bpf_tcp_stream_read;
@@ -217,6 +220,7 @@ static int bpf_tcp_init(struct sock *sk)
                return -EBUSY;
        }
 
+       psock->save_unhash = sk->sk_prot->unhash;
        psock->save_close = sk->sk_prot->close;
        psock->sk_proto = sk->sk_prot;
 
@@ -305,30 +309,12 @@ static struct smap_psock_map_entry *psock_map_pop(struct sock *sk,
        return e;
 }
 
-static void bpf_tcp_close(struct sock *sk, long timeout)
+static void bpf_tcp_remove(struct sock *sk, struct smap_psock *psock)
 {
-       void (*close_fun)(struct sock *sk, long timeout);
        struct smap_psock_map_entry *e;
        struct sk_msg_buff *md, *mtmp;
-       struct smap_psock *psock;
        struct sock *osk;
 
-       lock_sock(sk);
-       rcu_read_lock();
-       psock = smap_psock_sk(sk);
-       if (unlikely(!psock)) {
-               rcu_read_unlock();
-               release_sock(sk);
-               return sk->sk_prot->close(sk, timeout);
-       }
-
-       /* The psock may be destroyed anytime after exiting the RCU critial
-        * section so by the time we use close_fun the psock may no longer
-        * be valid. However, bpf_tcp_close is called with the sock lock
-        * held so the close hook and sk are still valid.
-        */
-       close_fun = psock->save_close;
-
        if (psock->cork) {
                free_start_sg(psock->sock, psock->cork, true);
                kfree(psock->cork);
@@ -379,6 +365,42 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
                kfree(e);
                e = psock_map_pop(sk, psock);
        }
+}
+
+static void bpf_tcp_unhash(struct sock *sk)
+{
+       void (*unhash_fun)(struct sock *sk);
+       struct smap_psock *psock;
+
+       rcu_read_lock();
+       psock = smap_psock_sk(sk);
+       if (unlikely(!psock)) {
+               rcu_read_unlock();
+               if (sk->sk_prot->unhash)
+                       sk->sk_prot->unhash(sk);
+               return;
+       }
+       unhash_fun = psock->save_unhash;
+       bpf_tcp_remove(sk, psock);
+       rcu_read_unlock();
+       unhash_fun(sk);
+}
+
+static void bpf_tcp_close(struct sock *sk, long timeout)
+{
+       void (*close_fun)(struct sock *sk, long timeout);
+       struct smap_psock *psock;
+
+       lock_sock(sk);
+       rcu_read_lock();
+       psock = smap_psock_sk(sk);
+       if (unlikely(!psock)) {
+               rcu_read_unlock();
+               release_sock(sk);
+               return sk->sk_prot->close(sk, timeout);
+       }
+       close_fun = psock->save_close;
+       bpf_tcp_remove(sk, psock);
        rcu_read_unlock();
        release_sock(sk);
        close_fun(sk, timeout);
@@ -2097,8 +2119,12 @@ static int sock_map_update_elem(struct bpf_map *map,
                return -EINVAL;
        }
 
+       /* ULPs are currently supported only for TCP sockets in ESTABLISHED
+        * state.
+        */
        if (skops.sk->sk_type != SOCK_STREAM ||
-           skops.sk->sk_protocol != IPPROTO_TCP) {
+           skops.sk->sk_protocol != IPPROTO_TCP ||
+           skops.sk->sk_state != TCP_ESTABLISHED) {
                fput(socket->file);
                return -EOPNOTSUPP;
        }
@@ -2453,6 +2479,16 @@ static int sock_hash_update_elem(struct bpf_map *map,
                return -EINVAL;
        }
 
+       /* ULPs are currently supported only for TCP sockets in ESTABLISHED
+        * state.
+        */
+       if (skops.sk->sk_type != SOCK_STREAM ||
+           skops.sk->sk_protocol != IPPROTO_TCP ||
+           skops.sk->sk_state != TCP_ESTABLISHED) {
+               fput(socket->file);
+               return -EOPNOTSUPP;
+       }
+
        lock_sock(skops.sk);
        preempt_disable();
        rcu_read_lock();
@@ -2543,10 +2579,22 @@ const struct bpf_map_ops sock_hash_ops = {
        .map_check_btf = map_check_no_btf,
 };
 
+static bool bpf_is_valid_sock_op(struct bpf_sock_ops_kern *ops)
+{
+       return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
+              ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB;
+}
 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
           struct bpf_map *, map, void *, key, u64, flags)
 {
        WARN_ON_ONCE(!rcu_read_lock_held());
+
+       /* ULPs are currently supported only for TCP sockets in ESTABLISHED
+        * state. This checks that the sock ops triggering the update is
+        * one indicating we are (or will be soon) in an ESTABLISHED state.
+        */
+       if (!bpf_is_valid_sock_op(bpf_sock))
+               return -EOPNOTSUPP;
        return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
 }
 
@@ -2565,6 +2613,9 @@ BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, bpf_sock,
           struct bpf_map *, map, void *, key, u64, flags)
 {
        WARN_ON_ONCE(!rcu_read_lock_held());
+
+       if (!bpf_is_valid_sock_op(bpf_sock))
+               return -EOPNOTSUPP;
        return sock_hash_ctx_update_elem(bpf_sock, map, key, flags);
 }
 
index 9bd54304446f8b477a61a799a2d75136672548bd..1b1d63b3634b580cf6b29384e162f76b28e05102 100644 (file)
@@ -23,6 +23,9 @@ config ARCH_HAS_SYNC_DMA_FOR_CPU
        bool
        select NEED_DMA_MAP_STATE
 
+config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
+       bool
+
 config DMA_DIRECT_OPS
        bool
        depends on HAS_DMA
index c80549bf82c6628fea20d230edc8b824e9f36f4d..dcb093e7b37770a2bbdb0e80f984e1cccae7350d 100644 (file)
@@ -3935,6 +3935,12 @@ int perf_event_read_local(struct perf_event *event, u64 *value,
                goto out;
        }
 
+       /* If this is a pinned event it must be running on this CPU */
+       if (event->attr.pinned && event->oncpu != smp_processor_id()) {
+               ret = -EBUSY;
+               goto out;
+       }
+
        /*
         * If the event is currently on this CPU, its either a per-task event,
         * or local to this CPU. Furthermore it means its ACTIVE (otherwise
index 71c20c1d40028614a19fed940a944b8cbad58776..9f481cfdf77dace2fa594585fde5079d46474c66 100644 (file)
@@ -241,7 +241,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
                 * the packet to be exactly of that size to make the link
                 * throughput estimation effective.
                 */
-               skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len);
+               skb_put_zero(skb, probe_len - hard_iface->bat_v.elp_skb->len);
 
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                           "Sending unicast (probe) ELP packet on interface %s to %pM\n",
@@ -268,6 +268,7 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
        struct batadv_priv *bat_priv;
        struct sk_buff *skb;
        u32 elp_interval;
+       bool ret;
 
        bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
        hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
@@ -329,8 +330,11 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
                 * may sleep and that is not allowed in an rcu protected
                 * context. Therefore schedule a task for that.
                 */
-               queue_work(batadv_event_workqueue,
-                          &hardif_neigh->bat_v.metric_work);
+               ret = queue_work(batadv_event_workqueue,
+                                &hardif_neigh->bat_v.metric_work);
+
+               if (!ret)
+                       batadv_hardif_neigh_put(hardif_neigh);
        }
        rcu_read_unlock();
 
index ff9659af6b9177cdb23523d79a4bc70665b1cc4d..5f1aeeded0e3d8efe3df4f7bbf81ed57f95fc06a 100644 (file)
@@ -1772,6 +1772,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
 {
        struct batadv_bla_backbone_gw *backbone_gw;
        struct ethhdr *ethhdr;
+       bool ret;
 
        ethhdr = eth_hdr(skb);
 
@@ -1795,8 +1796,13 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
        if (unlikely(!backbone_gw))
                return true;
 
-       queue_work(batadv_event_workqueue, &backbone_gw->report_work);
-       /* backbone_gw is unreferenced in the report work function function */
+       ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
+
+       /* backbone_gw is unreferenced in the report work function function
+        * if queue_work() call was successful
+        */
+       if (!ret)
+               batadv_backbone_gw_put(backbone_gw);
 
        return true;
 }
index 8b198ee798c910b40997ed9ca867fc931c53dcc3..140c61a3f1ecfec4fe23c5ddca19e18e2e86fd56 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/kernel.h>
 #include <linux/kref.h>
 #include <linux/list.h>
+#include <linux/lockdep.h>
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/rculist.h>
@@ -348,6 +349,9 @@ void batadv_gw_check_election(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator announcing gateway capabilities
  * @gateway: announced bandwidth information
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (gw.list_lock).
  */
 static void batadv_gw_node_add(struct batadv_priv *bat_priv,
                               struct batadv_orig_node *orig_node,
@@ -355,6 +359,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
 {
        struct batadv_gw_node *gw_node;
 
+       lockdep_assert_held(&bat_priv->gw.list_lock);
+
        if (gateway->bandwidth_down == 0)
                return;
 
@@ -369,10 +375,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
        gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
        gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
 
-       spin_lock_bh(&bat_priv->gw.list_lock);
        kref_get(&gw_node->refcount);
        hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list);
-       spin_unlock_bh(&bat_priv->gw.list_lock);
 
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                   "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
@@ -428,11 +432,14 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
 {
        struct batadv_gw_node *gw_node, *curr_gw = NULL;
 
+       spin_lock_bh(&bat_priv->gw.list_lock);
        gw_node = batadv_gw_node_get(bat_priv, orig_node);
        if (!gw_node) {
                batadv_gw_node_add(bat_priv, orig_node, gateway);
+               spin_unlock_bh(&bat_priv->gw.list_lock);
                goto out;
        }
+       spin_unlock_bh(&bat_priv->gw.list_lock);
 
        if (gw_node->bandwidth_down == ntohl(gateway->bandwidth_down) &&
            gw_node->bandwidth_up == ntohl(gateway->bandwidth_up))
index 8da3c9336111869b707dc29f6c6bd5e0258dd4dc..3ccc75ee719cf4eab27022d38754611a151805d7 100644 (file)
@@ -25,7 +25,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2018.2"
+#define BATADV_SOURCE_VERSION "2018.3"
 #endif
 
 /* B.A.T.M.A.N. parameters */
index c3578444f3cbe759a5385ac460ccb9d41ae1c4de..34caf129a9bf5531360f798be6a7059bad26a50f 100644 (file)
@@ -854,16 +854,27 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
        spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
        struct list_head *list;
 
+       /* Select ingoing or outgoing coding node */
+       if (in_coding) {
+               lock = &orig_neigh_node->in_coding_list_lock;
+               list = &orig_neigh_node->in_coding_list;
+       } else {
+               lock = &orig_neigh_node->out_coding_list_lock;
+               list = &orig_neigh_node->out_coding_list;
+       }
+
+       spin_lock_bh(lock);
+
        /* Check if nc_node is already added */
        nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
 
        /* Node found */
        if (nc_node)
-               return nc_node;
+               goto unlock;
 
        nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
        if (!nc_node)
-               return NULL;
+               goto unlock;
 
        /* Initialize nc_node */
        INIT_LIST_HEAD(&nc_node->list);
@@ -872,22 +883,14 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
        kref_get(&orig_neigh_node->refcount);
        nc_node->orig_node = orig_neigh_node;
 
-       /* Select ingoing or outgoing coding node */
-       if (in_coding) {
-               lock = &orig_neigh_node->in_coding_list_lock;
-               list = &orig_neigh_node->in_coding_list;
-       } else {
-               lock = &orig_neigh_node->out_coding_list_lock;
-               list = &orig_neigh_node->out_coding_list;
-       }
-
        batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
                   nc_node->addr, nc_node->orig_node->orig);
 
        /* Add nc_node to orig_node */
-       spin_lock_bh(lock);
        kref_get(&nc_node->refcount);
        list_add_tail_rcu(&nc_node->list, list);
+
+unlock:
        spin_unlock_bh(lock);
 
        return nc_node;
index 1485263a348bb50478ddc91f2bdd21dcc52db4d1..626ddca332db5f07fc79b1d0d2543b9529b85437 100644 (file)
@@ -574,15 +574,20 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
        struct batadv_softif_vlan *vlan;
        int err;
 
+       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+
        vlan = batadv_softif_vlan_get(bat_priv, vid);
        if (vlan) {
                batadv_softif_vlan_put(vlan);
+               spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
                return -EEXIST;
        }
 
        vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
-       if (!vlan)
+       if (!vlan) {
+               spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
                return -ENOMEM;
+       }
 
        vlan->bat_priv = bat_priv;
        vlan->vid = vid;
@@ -590,17 +595,23 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
 
        atomic_set(&vlan->ap_isolation, 0);
 
+       kref_get(&vlan->refcount);
+       hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
+       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
+       /* batadv_sysfs_add_vlan cannot be in the spinlock section due to the
+        * sleeping behavior of the sysfs functions and the fs_reclaim lock
+        */
        err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
        if (err) {
-               kfree(vlan);
+               /* ref for the function */
+               batadv_softif_vlan_put(vlan);
+
+               /* ref for the list */
+               batadv_softif_vlan_put(vlan);
                return err;
        }
 
-       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
-       kref_get(&vlan->refcount);
-       hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
-       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-
        /* add a new TT local entry. This one will be marked with the NOPURGE
         * flag
         */
index f2eef43bd2ec5b798ba552ff14eedcfa734b39d6..09427fc6494a157554d8b19f3481a878a9f97bba 100644 (file)
@@ -188,7 +188,8 @@ ssize_t batadv_store_##_name(struct kobject *kobj,                  \
                                                                        \
        return __batadv_store_uint_attr(buff, count, _min, _max,        \
                                        _post_func, attr,               \
-                                       &bat_priv->_var, net_dev);      \
+                                       &bat_priv->_var, net_dev,       \
+                                       NULL);  \
 }
 
 #define BATADV_ATTR_SIF_SHOW_UINT(_name, _var)                         \
@@ -262,7 +263,9 @@ ssize_t batadv_store_##_name(struct kobject *kobj,                  \
                                                                        \
        length = __batadv_store_uint_attr(buff, count, _min, _max,      \
                                          _post_func, attr,             \
-                                         &hard_iface->_var, net_dev);  \
+                                         &hard_iface->_var,            \
+                                         hard_iface->soft_iface,       \
+                                         net_dev);                     \
                                                                        \
        batadv_hardif_put(hard_iface);                          \
        return length;                                                  \
@@ -356,10 +359,12 @@ __batadv_store_bool_attr(char *buff, size_t count,
 
 static int batadv_store_uint_attr(const char *buff, size_t count,
                                  struct net_device *net_dev,
+                                 struct net_device *slave_dev,
                                  const char *attr_name,
                                  unsigned int min, unsigned int max,
                                  atomic_t *attr)
 {
+       char ifname[IFNAMSIZ + 3] = "";
        unsigned long uint_val;
        int ret;
 
@@ -385,8 +390,11 @@ static int batadv_store_uint_attr(const char *buff, size_t count,
        if (atomic_read(attr) == uint_val)
                return count;
 
-       batadv_info(net_dev, "%s: Changing from: %i to: %lu\n",
-                   attr_name, atomic_read(attr), uint_val);
+       if (slave_dev)
+               snprintf(ifname, sizeof(ifname), "%s: ", slave_dev->name);
+
+       batadv_info(net_dev, "%s: %sChanging from: %i to: %lu\n",
+                   attr_name, ifname, atomic_read(attr), uint_val);
 
        atomic_set(attr, uint_val);
        return count;
@@ -397,12 +405,13 @@ static ssize_t __batadv_store_uint_attr(const char *buff, size_t count,
                                        void (*post_func)(struct net_device *),
                                        const struct attribute *attr,
                                        atomic_t *attr_store,
-                                       struct net_device *net_dev)
+                                       struct net_device *net_dev,
+                                       struct net_device *slave_dev)
 {
        int ret;
 
-       ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max,
-                                    attr_store);
+       ret = batadv_store_uint_attr(buff, count, net_dev, slave_dev,
+                                    attr->name, min, max, attr_store);
        if (post_func && ret)
                post_func(net_dev);
 
@@ -571,7 +580,7 @@ static ssize_t batadv_store_gw_sel_class(struct kobject *kobj,
        return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
                                        batadv_post_gw_reselect, attr,
                                        &bat_priv->gw.sel_class,
-                                       bat_priv->soft_iface);
+                                       bat_priv->soft_iface, NULL);
 }
 
 static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
@@ -1090,8 +1099,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
        if (old_tp_override == tp_override)
                goto out;
 
-       batadv_info(net_dev, "%s: Changing from: %u.%u MBit to: %u.%u MBit\n",
-                   "throughput_override",
+       batadv_info(hard_iface->soft_iface,
+                   "%s: %s: Changing from: %u.%u MBit to: %u.%u MBit\n",
+                   "throughput_override", net_dev->name,
                    old_tp_override / 10, old_tp_override % 10,
                    tp_override / 10, tp_override % 10);
 
index 12a2b7d21376721d15c6a31f3e794e4270d74b5c..d21624c446655d57786a1bcfa45aaf57c5ce9701 100644 (file)
@@ -1613,6 +1613,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
 {
        struct batadv_tt_orig_list_entry *orig_entry;
 
+       spin_lock_bh(&tt_global->list_lock);
+
        orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
        if (orig_entry) {
                /* refresh the ttvn: the current value could be a bogus one that
@@ -1635,11 +1637,9 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
        orig_entry->flags = flags;
        kref_init(&orig_entry->refcount);
 
-       spin_lock_bh(&tt_global->list_lock);
        kref_get(&orig_entry->refcount);
        hlist_add_head_rcu(&orig_entry->list,
                           &tt_global->orig_list);
-       spin_unlock_bh(&tt_global->list_lock);
        atomic_inc(&tt_global->orig_list_count);
 
 sync_flags:
@@ -1647,6 +1647,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
 out:
        if (orig_entry)
                batadv_tt_orig_list_entry_put(orig_entry);
+
+       spin_unlock_bh(&tt_global->list_lock);
 }
 
 /**
index a637458205d16bf838f796383d8cc15ac861801b..40e69c9346d22c09481544b8b4dec56cad88b64a 100644 (file)
@@ -529,15 +529,20 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
 {
        struct batadv_tvlv_handler *tvlv_handler;
 
+       spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
+
        tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
        if (tvlv_handler) {
+               spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
                batadv_tvlv_handler_put(tvlv_handler);
                return;
        }
 
        tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
-       if (!tvlv_handler)
+       if (!tvlv_handler) {
+               spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
                return;
+       }
 
        tvlv_handler->ogm_handler = optr;
        tvlv_handler->unicast_handler = uptr;
@@ -547,7 +552,6 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
        kref_init(&tvlv_handler->refcount);
        INIT_HLIST_NODE(&tvlv_handler->list);
 
-       spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
        kref_get(&tvlv_handler->refcount);
        hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
        spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
index 65fc366a78a4c455a02e74bdc30c9249698eb488..8c0ed225e2801a741f81eaa6a626eb191884f0aa 100644 (file)
@@ -2592,7 +2592,7 @@ static int devlink_resource_fill(struct genl_info *info,
        if (!nlh) {
                err = devlink_dpipe_send_and_alloc_skb(&skb, info);
                if (err)
-                       goto err_skb_send_alloc;
+                       return err;
                goto send_done;
        }
        return genlmsg_reply(skb, info);
@@ -2600,7 +2600,6 @@ static int devlink_resource_fill(struct genl_info *info,
 nla_put_failure:
        err = -EMSGSIZE;
 err_resource_put:
-err_skb_send_alloc:
        nlmsg_free(skb);
        return err;
 }
index c9993c6c2fd4f492d5113d9c328c7bf3ddcfa9f3..234a0ec2e9327727e95a76f44a29e8cc93feff84 100644 (file)
@@ -2624,6 +2624,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_GPHYSTATS:
        case ETHTOOL_GTSO:
        case ETHTOOL_GPERMADDR:
+       case ETHTOOL_GUFO:
        case ETHTOOL_GGSO:
        case ETHTOOL_GGRO:
        case ETHTOOL_GFLAGS:
index 57557a6a950cc9cdff959391576a03381d328c1a..3219a2932463096566ce8ff336ecdf699422dd65 100644 (file)
@@ -187,16 +187,16 @@ static void poll_napi(struct net_device *dev)
        }
 }
 
-static void netpoll_poll_dev(struct net_device *dev)
+void netpoll_poll_dev(struct net_device *dev)
 {
-       const struct net_device_ops *ops;
        struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
+       const struct net_device_ops *ops;
 
        /* Don't do any rx activity if the dev_lock mutex is held
         * the dev_open/close paths use this to block netpoll activity
         * while changing device state
         */
-       if (down_trylock(&ni->dev_lock))
+       if (!ni || down_trylock(&ni->dev_lock))
                return;
 
        if (!netif_running(dev)) {
@@ -205,13 +205,8 @@ static void netpoll_poll_dev(struct net_device *dev)
        }
 
        ops = dev->netdev_ops;
-       if (!ops->ndo_poll_controller) {
-               up(&ni->dev_lock);
-               return;
-       }
-
-       /* Process pending work on NIC */
-       ops->ndo_poll_controller(dev);
+       if (ops->ndo_poll_controller)
+               ops->ndo_poll_controller(dev);
 
        poll_napi(dev);
 
@@ -219,6 +214,7 @@ static void netpoll_poll_dev(struct net_device *dev)
 
        zap_completion_queue();
 }
+EXPORT_SYMBOL(netpoll_poll_dev);
 
 void netpoll_poll_disable(struct net_device *dev)
 {
@@ -613,8 +609,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
        strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
        INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
 
-       if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
-           !ndev->netdev_ops->ndo_poll_controller) {
+       if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
                np_err(np, "%s doesn't support polling, aborting\n",
                       np->dev_name);
                err = -ENOTSUPP;
index c4f5602308edca064297fe8764764f65ebe84569..284a22154b4e6cb129ca7dfd127ce11e306dba19 100644 (file)
@@ -627,6 +627,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                    const struct iphdr *tnl_params, u8 protocol)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
+       unsigned int inner_nhdr_len = 0;
        const struct iphdr *inner_iph;
        struct flowi4 fl4;
        u8     tos, ttl;
@@ -636,6 +637,14 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        __be32 dst;
        bool connected;
 
+       /* ensure we can access the inner net header, for several users below */
+       if (skb->protocol == htons(ETH_P_IP))
+               inner_nhdr_len = sizeof(struct iphdr);
+       else if (skb->protocol == htons(ETH_P_IPV6))
+               inner_nhdr_len = sizeof(struct ipv6hdr);
+       if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
+               goto tx_error;
+
        inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
        connected = (tunnel->parms.iph.daddr != 0);
 
index d51a8c0b3372d09ad1c78b76f94b4ebaf7ca3f61..c63ccce6425fb221170b058e701c11b7ba9a497e 100644 (file)
@@ -4201,7 +4201,6 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
                                p++;
                                continue;
                        }
-                       state->offset++;
                        return ifa;
                }
 
@@ -4225,13 +4224,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
                return ifa;
        }
 
+       state->offset = 0;
        while (++state->bucket < IN6_ADDR_HSIZE) {
-               state->offset = 0;
                hlist_for_each_entry_rcu(ifa,
                                     &inet6_addr_lst[state->bucket], addr_lst) {
                        if (!net_eq(dev_net(ifa->idev->dev), net))
                                continue;
-                       state->offset++;
                        return ifa;
                }
        }
index 419960b0ba16370ec9c47bee16aad15d9819f349..a0b6932c3afd23c5b1b0dded8190f9a74ef274f6 100644 (file)
@@ -1234,7 +1234,7 @@ static inline int
 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
-       const struct iphdr  *iph = ip_hdr(skb);
+       const struct iphdr  *iph;
        int encap_limit = -1;
        struct flowi6 fl6;
        __u8 dsfield;
@@ -1242,6 +1242,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        u8 tproto;
        int err;
 
+       /* ensure we can access the full inner ip header */
+       if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+               return -1;
+
+       iph = ip_hdr(skb);
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 
        tproto = READ_ONCE(t->parms.proto);
@@ -1306,7 +1311,7 @@ static inline int
 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
-       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       struct ipv6hdr *ipv6h;
        int encap_limit = -1;
        __u16 offset;
        struct flowi6 fl6;
@@ -1315,6 +1320,10 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        u8 tproto;
        int err;
 
+       if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
+               return -1;
+
+       ipv6h = ipv6_hdr(skb);
        tproto = READ_ONCE(t->parms.proto);
        if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
            ip6_tnl_addr_conflict(t, ipv6h))
index 480a79f47c52383e3e254b7ca3e7bd16e473da2f..826b14de7dbbc8d1e2100820374654e5722c32b6 100644 (file)
@@ -364,11 +364,14 @@ EXPORT_SYMBOL(ip6_dst_alloc);
 
 static void ip6_dst_destroy(struct dst_entry *dst)
 {
+       struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
        struct rt6_info *rt = (struct rt6_info *)dst;
        struct fib6_info *from;
        struct inet6_dev *idev;
 
-       dst_destroy_metrics_generic(dst);
+       if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
+               kfree(p);
+
        rt6_uncached_list_del(rt);
 
        idev = rt->rt6i_idev;
@@ -976,6 +979,10 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
        rt->rt6i_flags &= ~RTF_EXPIRES;
        rcu_assign_pointer(rt->from, from);
        dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
+       if (from->fib6_metrics != &dst_default_metrics) {
+               rt->dst._metrics |= DST_METRICS_REFCOUNTED;
+               refcount_inc(&from->fib6_metrics->refcnt);
+       }
 }
 
 /* Caller must already hold reference to @ort */
index 7a4de6d618b169d0f72b04386965b6e02ba27ada..8fbe6cdbe255d4d32b790baa22e1431d240f6e7f 100644 (file)
@@ -1533,10 +1533,14 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
        unsigned int flags;
 
        if (event == NETDEV_REGISTER) {
-               /* For now just support Ethernet, IPGRE, SIT and IPIP devices */
+
+               /* For now just support Ethernet, IPGRE, IP6GRE, SIT and
+                * IPIP devices
+                */
                if (dev->type == ARPHRD_ETHER ||
                    dev->type == ARPHRD_LOOPBACK ||
                    dev->type == ARPHRD_IPGRE ||
+                   dev->type == ARPHRD_IP6GRE ||
                    dev->type == ARPHRD_SIT ||
                    dev->type == ARPHRD_TUNNEL) {
                        mdev = mpls_add_dev(dev);
index c070dfc0190aa2bd84871eee50596a3d7c735636..c92894c3e40a34b34bdf533b1712fd1c1909595e 100644 (file)
@@ -781,7 +781,8 @@ static int netlbl_unlabel_addrinfo_get(struct genl_info *info,
 {
        u32 addr_len;
 
-       if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) {
+       if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] &&
+           info->attrs[NLBL_UNLABEL_A_IPV4MASK]) {
                addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]);
                if (addr_len != sizeof(struct in_addr) &&
                    addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK]))
index ac8030c4bcf8638f7b77873f21285c88795a62f4..19cb2e473ea607af8caa56848c9976baaf88acf4 100644 (file)
@@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
                }
                create_info = (struct hci_create_pipe_resp *)skb->data;
 
+               if (create_info->pipe >= NFC_HCI_MAX_PIPES) {
+                       status = NFC_HCI_ANY_E_NOK;
+                       goto exit;
+               }
+
                /* Save the new created pipe and bind with local gate,
                 * the description for skb->data[3] is destination gate id
                 * but since we received this cmd from host controller, we
@@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
                }
                delete_info = (struct hci_delete_pipe_noti *)skb->data;
 
+               if (delete_info->pipe >= NFC_HCI_MAX_PIPES) {
+                       status = NFC_HCI_ANY_E_NOK;
+                       goto exit;
+               }
+
                hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE;
                hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST;
                break;
index 73427ff439f90fc069ac44e53a5f8e2597a85bc6..71ff356ee7020d881b35d71747076f28530cb5e7 100644 (file)
@@ -443,7 +443,7 @@ int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
 int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
 
 /* ib_stats.c */
-DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
+DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
 #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
 #define rds_ib_stats_add(member, count) \
                rds_stats_add_which(rds_ib_stats, member, count)
index 12cac85da994356ef24cf264e1fb8451f2e303dc..033696e6f74fbea97392059d243a078af925e832 100644 (file)
@@ -260,6 +260,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
 bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
 {
        struct dst_entry *dst = sctp_transport_dst_check(t);
+       struct sock *sk = t->asoc->base.sk;
        bool change = true;
 
        if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
@@ -271,12 +272,19 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
        pmtu = SCTP_TRUNC4(pmtu);
 
        if (dst) {
-               dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
+               struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family);
+               union sctp_addr addr;
+
+               pf->af->from_sk(&addr, sk);
+               pf->to_sk_daddr(&t->ipaddr, sk);
+               dst->ops->update_pmtu(dst, sk, NULL, pmtu);
+               pf->to_sk_daddr(&addr, sk);
+
                dst = sctp_transport_dst_check(t);
        }
 
        if (!dst) {
-               t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
+               t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
                dst = t->dst;
        }
 
index 2d8a1e15e4f95ffb9ec33d48387e0832f2166ccf..015231789ed2edf55f3cd99978496b2b5e9d0bac 100644 (file)
@@ -742,7 +742,10 @@ static void smc_connect_work(struct work_struct *work)
                smc->sk.sk_err = -rc;
 
 out:
-       smc->sk.sk_state_change(&smc->sk);
+       if (smc->sk.sk_err)
+               smc->sk.sk_state_change(&smc->sk);
+       else
+               smc->sk.sk_write_space(&smc->sk);
        kfree(smc->connect_info);
        smc->connect_info = NULL;
        release_sock(&smc->sk);
@@ -1150,9 +1153,9 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
 }
 
 /* listen worker: finish RDMA setup */
-static void smc_listen_rdma_finish(struct smc_sock *new_smc,
-                                  struct smc_clc_msg_accept_confirm *cclc,
-                                  int local_contact)
+static int smc_listen_rdma_finish(struct smc_sock *new_smc,
+                                 struct smc_clc_msg_accept_confirm *cclc,
+                                 int local_contact)
 {
        struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
        int reason_code = 0;
@@ -1175,11 +1178,12 @@ static void smc_listen_rdma_finish(struct smc_sock *new_smc,
                if (reason_code)
                        goto decline;
        }
-       return;
+       return 0;
 
 decline:
        mutex_unlock(&smc_create_lgr_pending);
        smc_listen_decline(new_smc, reason_code, local_contact);
+       return reason_code;
 }
 
 /* setup for RDMA connection of server */
@@ -1276,8 +1280,10 @@ static void smc_listen_work(struct work_struct *work)
        }
 
        /* finish worker */
-       if (!ism_supported)
-               smc_listen_rdma_finish(new_smc, &cclc, local_contact);
+       if (!ism_supported) {
+               if (smc_listen_rdma_finish(new_smc, &cclc, local_contact))
+                       return;
+       }
        smc_conn_save_peer_info(new_smc, &cclc);
        mutex_unlock(&smc_create_lgr_pending);
        smc_listen_out_connected(new_smc);
@@ -1529,7 +1535,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
                return EPOLLNVAL;
 
        smc = smc_sk(sock->sk);
-       if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
+       if (smc->use_fallback) {
                /* delegate to CLC child sock */
                mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
                sk->sk_err = smc->clcsock->sk->sk_err;
@@ -1560,9 +1566,9 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
                                mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
                        if (sk->sk_state == SMC_APPCLOSEWAIT1)
                                mask |= EPOLLIN;
+                       if (smc->conn.urg_state == SMC_URG_VALID)
+                               mask |= EPOLLPRI;
                }
-               if (smc->conn.urg_state == SMC_URG_VALID)
-                       mask |= EPOLLPRI;
        }
 
        return mask;
index 83aba9ade060a10d3df5452b7c07648eb7759df8..52241d679cc91cd3b6ec59a4388d33b68c76f1f1 100644 (file)
@@ -446,14 +446,12 @@ int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
        vec[i++].iov_len = sizeof(trl);
        /* due to the few bytes needed for clc-handshake this cannot block */
        len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen);
-       if (len < sizeof(pclc)) {
-               if (len >= 0) {
-                       reason_code = -ENETUNREACH;
-                       smc->sk.sk_err = -reason_code;
-               } else {
-                       smc->sk.sk_err = smc->clcsock->sk->sk_err;
-                       reason_code = -smc->sk.sk_err;
-               }
+       if (len < 0) {
+               smc->sk.sk_err = smc->clcsock->sk->sk_err;
+               reason_code = -smc->sk.sk_err;
+       } else if (len < (int)sizeof(pclc)) {
+               reason_code = -ENETUNREACH;
+               smc->sk.sk_err = -reason_code;
        }
 
        return reason_code;
index ac961dfb1ea1b775b666be3fdc0f292545703533..ea2b87f29469610c1afaf5e043a935c24589425b 100644 (file)
@@ -100,15 +100,14 @@ static void smc_close_active_abort(struct smc_sock *smc)
        struct smc_cdc_conn_state_flags *txflags =
                &smc->conn.local_tx_ctrl.conn_state_flags;
 
-       sk->sk_err = ECONNABORTED;
-       if (smc->clcsock && smc->clcsock->sk) {
-               smc->clcsock->sk->sk_err = ECONNABORTED;
-               smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
+       if (sk->sk_state != SMC_INIT && smc->clcsock && smc->clcsock->sk) {
+               sk->sk_err = ECONNABORTED;
+               if (smc->clcsock && smc->clcsock->sk) {
+                       smc->clcsock->sk->sk_err = ECONNABORTED;
+                       smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
+               }
        }
        switch (sk->sk_state) {
-       case SMC_INIT:
-               sk->sk_state = SMC_PEERABORTWAIT;
-               break;
        case SMC_ACTIVE:
                sk->sk_state = SMC_PEERABORTWAIT;
                release_sock(sk);
@@ -143,6 +142,7 @@ static void smc_close_active_abort(struct smc_sock *smc)
        case SMC_PEERFINCLOSEWAIT:
                sock_put(sk); /* passive closing */
                break;
+       case SMC_INIT:
        case SMC_PEERABORTWAIT:
        case SMC_CLOSED:
                break;
index 01c6ce042a1cdb338d81167b1a495693a2a22154..7cb3e4f07c10f5da154c0322ef4fae54a6dc85e2 100644 (file)
@@ -461,7 +461,7 @@ static const struct genl_ops smc_pnet_ops[] = {
 };
 
 /* SMC_PNETID family definition */
-static struct genl_family smc_pnet_nl_family = {
+static struct genl_family smc_pnet_nl_family __ro_after_init = {
        .hdrsize = 0,
        .name = SMCR_GENL_FAMILY_NAME,
        .version = SMCR_GENL_FAMILY_VERSION,
index 3b602a1e27fa224e3a7628436ac6f93309a54c4a..711e89d8c4153b6123678772e83efe1092f4ae1f 100644 (file)
@@ -300,7 +300,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
        }
        dh_inputs.g_size = dlen;
 
-       dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key);
+       dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
        if (dlen < 0) {
                ret = dlen;
                goto out2;
index 664ced8cb1b041918b3c06a3df58a3940aa640e1..e907ba6f15e532b678d5aa850de09aefeaa1b79d 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: (LGPL-2.0+ OR BSD-2-Clause)
 /* Copyright (C) 2018 Netronome Systems, Inc. */
 
 #ifndef __TOOLS_LIBC_COMPAT_H
index 6f54f84144a00940a54484ec870be6ed1104f314..9b552c0fc47db8cded0fda2a53e40f9d5d42a036 100644 (file)
@@ -580,7 +580,11 @@ static void test_sockmap(int tasks, void *data)
        /* Test update without programs */
        for (i = 0; i < 6; i++) {
                err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
-               if (err) {
+               if (i < 2 && !err) {
+                       printf("Allowed update sockmap '%i:%i' not in ESTABLISHED\n",
+                              i, sfd[i]);
+                       goto out_sockmap;
+               } else if (i >= 2 && err) {
                        printf("Failed noprog update sockmap '%i:%i'\n",
                               i, sfd[i]);
                        goto out_sockmap;
@@ -741,7 +745,7 @@ static void test_sockmap(int tasks, void *data)
        }
 
        /* Test map update elem afterwards fd lives in fd and map_fd */
-       for (i = 0; i < 6; i++) {
+       for (i = 2; i < 6; i++) {
                err = bpf_map_update_elem(map_fd_rx, &i, &sfd[i], BPF_ANY);
                if (err) {
                        printf("Failed map_fd_rx update sockmap %i '%i:%i'\n",
@@ -845,7 +849,7 @@ static void test_sockmap(int tasks, void *data)
        }
 
        /* Delete the elems without programs */
-       for (i = 0; i < 6; i++) {
+       for (i = 2; i < 6; i++) {
                err = bpf_map_delete_elem(fd, &i);
                if (err) {
                        printf("Failed delete sockmap %i '%i:%i'\n",
index 32a194e3e07a5a60d4c00dfdcfd25ad31ef38b6e..0ab9423d009f5b8c189dddb4a1f8f81c8a3a2101 100755 (executable)
@@ -178,8 +178,8 @@ setup() {
 
 cleanup() {
        [ ${cleanup_done} -eq 1 ] && return
-       ip netns del ${NS_A} 2 > /dev/null
-       ip netns del ${NS_B} 2 > /dev/null
+       ip netns del ${NS_A} 2> /dev/null
+       ip netns del ${NS_B} 2> /dev/null
        cleanup_done=1
 }
 
index 93baacab7693c48576e4872c8d31287931b73091..d056486f49de5eacad70b4813287094e5e597927 100644 (file)
@@ -1,5 +1,6 @@
 TEST_GEN_PROGS := copy_first_unaligned alignment_handler
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c ../utils.c
index b4d7432a0ecd1b4af5fa5fe2071276172a684dc5..d40300a65b42f79ba4c48705d909906ed9115dd0 100644 (file)
@@ -4,6 +4,7 @@ TEST_GEN_FILES := exec_target
 
 CFLAGS += -O2
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 1be547434a49c3feb8f7e40c03d4b5260a3b30d4..ede4d3dae7505ef31f822bac9b613aef464f6125 100644 (file)
@@ -5,6 +5,7 @@ all: $(TEST_PROGS)
 
 $(TEST_PROGS): ../harness.c ../utils.c
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 clean:
index 1cf89a34d97ca35299bf62b588b07d61ecbbb8a8..44574f3818b3d71d51a2021412ca70420b4543e2 100644 (file)
@@ -17,6 +17,7 @@ TEST_GEN_PROGS := copyuser_64_t0 copyuser_64_t1 copyuser_64_t2 \
 
 EXTRA_SOURCES := validate.c ../harness.c stubs.S
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(OUTPUT)/copyuser_64_t%:      copyuser_64.S $(EXTRA_SOURCES)
index 55d7db7a616bcd7661fceeb470ecb6f153cd9eac..5df476364b4d46dd1563889bbd86864622c29ffd 100644 (file)
@@ -3,6 +3,7 @@ TEST_GEN_PROGS := dscr_default_test dscr_explicit_test dscr_user_test   \
              dscr_inherit_test dscr_inherit_exec_test dscr_sysfs_test  \
              dscr_sysfs_thread_test
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(OUTPUT)/dscr_default_test: LDLIBS += -lpthread
index 0dd3a01fdab92bc887ddc70cda06fd20c89c5154..11a10d7a2bbd9f1c93ccbf309ac5d0dc9d688f2b 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 8ebbe96d80a8452575fb234b6eba23a19e2c3a4e..33ced6e0ad25e07047e19699c90303ed97d157fa 100644 (file)
@@ -5,6 +5,7 @@ noarg:
 TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors
 TEST_GEN_FILES := tempfile
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 6e1629bf5b09dbfb850acdb0f0c60aafdc6e3ea6..19046db995fee387a77f9fd2f2d130078c37cb71 100644 (file)
@@ -5,6 +5,7 @@ noarg:
 TEST_GEN_PROGS := count_instructions l3_bank_test per_event_excludes
 EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 all: $(TEST_GEN_PROGS) ebb
index c4e64bc2e2650a2e02fac4a6f891e973a126eced..bd5dfa509272a75b97b1dbf8ac2be00c0a1ab1cc 100644 (file)
@@ -17,6 +17,7 @@ TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test   \
         lost_exception_test no_handler_test                    \
         cycles_with_mmcr2_test
 
+top_srcdir = ../../../../../..
 include ../../../lib.mk
 
 $(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c \
index 175366db7be8bc1261194ac80d009cd33e0c9c59..ea2b7bd09e369c4fb679d1baeab71e80712af186 100644 (file)
@@ -2,6 +2,7 @@ CFLAGS += -I$(CURDIR)
 
 TEST_GEN_PROGS := load_unaligned_zeropad
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 28f5b781a553f4899e004e5d1450ab4b92de3407..923d531265f8c22d3adf2442e0a48fc7c1fffb5d 100644 (file)
@@ -4,6 +4,7 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
               ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \
               perf-hwbreak
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 all: $(TEST_PROGS)
index a7cbd5082e27175822bb2d351b7ebdbf9c0e345d..1fca25c6ace067ffb7a913508b4e13059cb04770 100644 (file)
@@ -8,6 +8,7 @@ $(TEST_PROGS): ../harness.c ../utils.c signal.S
 CFLAGS += -maltivec
 signal_tm: CFLAGS += -mhtm
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 clean:
index 10b35c87a4f4e316315c977b09e2b3bc6ef2b73c..7fc0623d85c314636be8cd3d393fb6c35c643edb 100644 (file)
@@ -29,6 +29,7 @@ endif
 
 ASFLAGS = $(CFLAGS)
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): $(EXTRA_SOURCES)
index 30b8ff8fb82e7a161759bf68363e69dcbcde2304..fcd2dcb8972babf90209b699307bd086f08c5f90 100644 (file)
@@ -5,6 +5,7 @@ ASFLAGS += -O2 -Wall -g -nostdlib -m64
 
 EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S
index da22ca7c38c185a5bb4df90a81fa71a9fbccc6c1..161b8846336fdb324f97833ce821babd9c139eae 100644 (file)
@@ -2,6 +2,7 @@ TEST_GEN_PROGS := ipc_unmuxed
 
 CFLAGS += -I../../../../../usr/include
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index c0e45d2dde25d115b73ae2e14002a8b24167abbc..9fc2cf6fbc92c9214f9978dec423c23147ca359b 100644 (file)
@@ -6,6 +6,7 @@ TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack
        tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \
        $(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c ../utils.c
index f8ced26748f84408d902cc4ce58012874c545a58..fb82068c9fda297e1c505c440d7cbe112119b581 100644 (file)
@@ -2,6 +2,7 @@ TEST_GEN_PROGS := test-vphn
 
 CFLAGS += -m64
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c